chore: vendor

This commit is contained in:
2024-08-04 11:06:58 +02:00
parent 2a5985e44e
commit 04aec8232f
3557 changed files with 981078 additions and 1 deletions

910
vendor/github.com/docker/cli/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,910 @@
# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT.
# This file lists all contributors to the repository.
# See scripts/docs/generate-authors.sh to make modifications.
A. Lester Buck III <github-reg@nbolt.com>
Aanand Prasad <aanand.prasad@gmail.com>
Aaron L. Xu <liker.xu@foxmail.com>
Aaron Lehmann <alehmann@netflix.com>
Aaron.L.Xu <likexu@harmonycloud.cn>
Abdur Rehman <abdur_rehman@mentor.com>
Abhinandan Prativadi <abhi@docker.com>
Abin Shahab <ashahab@altiscale.com>
Abreto FU <public@abreto.email>
Ace Tang <aceapril@126.com>
Addam Hardy <addam.hardy@gmail.com>
Adolfo Ochagavía <aochagavia92@gmail.com>
Adrian Plata <adrian.plata@docker.com>
Adrien Duermael <adrien@duermael.com>
Adrien Folie <folie.adrien@gmail.com>
Adyanth Hosavalike <ahosavalike@ucsd.edu>
Ahmet Alp Balkan <ahmetb@microsoft.com>
Aidan Feldman <aidan.feldman@gmail.com>
Aidan Hobson Sayers <aidanhs@cantab.net>
AJ Bowen <aj@soulshake.net>
Akhil Mohan <akhil.mohan@mayadata.io>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akim Demaille <akim.demaille@docker.com>
Alan Thompson <cloojure@gmail.com>
Alano Terblanche <alano.terblanche@docker.com>
Albert Callarisa <shark234@gmail.com>
Alberto Roura <mail@albertoroura.com>
Albin Kerouanton <albinker@gmail.com>
Aleksa Sarai <asarai@suse.de>
Aleksander Piotrowski <apiotrowski312@gmail.com>
Alessandro Boch <aboch@tetrationanalytics.com>
Alex Couture-Beil <alex@earthly.dev>
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
Alex Mayer <amayer5125@gmail.com>
Alexander Boyd <alex@opengroove.org>
Alexander Chneerov <achneerov@gmail.com>
Alexander Larsson <alexl@redhat.com>
Alexander Morozov <lk4d4math@gmail.com>
Alexander Ryabov <i@sepa.spb.ru>
Alexandre González <agonzalezro@gmail.com>
Alexey Igrychev <alexey.igrychev@flant.com>
Alexis Couvreur <alexiscouvreur.pro@gmail.com>
Alfred Landrum <alfred.landrum@docker.com>
Ali Rostami <rostami.ali@gmail.com>
Alicia Lauerman <alicia@eta.im>
Allen Sun <allensun.shl@alibaba-inc.com>
Alvin Deng <alvin.q.deng@utexas.edu>
Amen Belayneh <amenbelayneh@gmail.com>
Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
Amir Goldstein <amir73il@aquasec.com>
Amit Krishnan <amit.krishnan@oracle.com>
Amit Shukla <amit.shukla@docker.com>
Amy Lindburg <amy.lindburg@docker.com>
Anca Iordache <anca.iordache@docker.com>
Anda Xu <anda.xu@docker.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andreas Köhler <andi5.py@gmx.net>
Andres G. Aragoneses <knocte@gmail.com>
Andres Leon Rangel <aleon1220@gmail.com>
Andrew France <andrew@avito.co.uk>
Andrew Hsu <andrewhsu@docker.com>
Andrew Macpherson <hopscotch23@gmail.com>
Andrew McDonnell <bugs@andrewmcdonnell.net>
Andrew Po <absourd.noise@gmail.com>
Andrew-Zipperer <atzipperer@gmail.com>
Andrey Petrov <andrey.petrov@shazow.net>
Andrii Berehuliak <berkusandrew@gmail.com>
André Martins <aanm90@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Andy Rothfusz <github@developersupport.net>
Anil Madhavapeddy <anil@recoil.org>
Ankush Agarwal <ankushagarwal11@gmail.com>
Anne Henmi <anne.henmi@docker.com>
Anton Polonskiy <anton.polonskiy@gmail.com>
Antonio Murdaca <antonio.murdaca@gmail.com>
Antonis Kalipetis <akalipetis@gmail.com>
Anusha Ragunathan <anusha.ragunathan@docker.com>
Ao Li <la9249@163.com>
Arash Deshmeh <adeshmeh@ca.ibm.com>
Arko Dasgupta <arko@tetrate.io>
Arnaud Porterie <icecrime@gmail.com>
Arnaud Rebillout <elboulangero@gmail.com>
Arthur Peka <arthur.peka@outlook.com>
Ashly Mathew <ashly.mathew@sap.com>
Ashwini Oruganti <ashwini.oruganti@gmail.com>
Aslam Ahemad <aslamahemad@gmail.com>
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
Barnaby Gray <barnaby@pickle.me.uk>
Bastiaan Bakker <bbakker@xebia.com>
BastianHofmann <bastianhofmann@me.com>
Ben Bodenmiller <bbodenmiller@gmail.com>
Ben Bonnefoy <frenchben@docker.com>
Ben Creasy <ben@bencreasy.com>
Ben Firshman <ben@firshman.co.uk>
Benjamin Boudreau <boudreau.benjamin@gmail.com>
Benjamin Böhmke <benjamin@boehmke.net>
Benjamin Nater <me@bn4t.me>
Benoit Sigoure <tsunanet@gmail.com>
Bhumika Bayani <bhumikabayani@gmail.com>
Bill Wang <ozbillwang@gmail.com>
Bin Liu <liubin0329@gmail.com>
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
Bishal Das <bishalhnj127@gmail.com>
Bjorn Neergaard <bjorn.neergaard@docker.com>
Boaz Shuster <ripcurld.github@gmail.com>
Boban Acimovic <boban.acimovic@gmail.com>
Bogdan Anton <contact@bogdananton.ro>
Boris Pruessmann <boris@pruessmann.org>
Brad Baker <brad@brad.fi>
Bradley Cicenas <bradley.cicenas@gmail.com>
Brandon Mitchell <git@bmitch.net>
Brandon Philips <brandon.philips@coreos.com>
Brent Salisbury <brent.salisbury@docker.com>
Bret Fisher <bret@bretfisher.com>
Brian (bex) Exelbierd <bexelbie@redhat.com>
Brian Goff <cpuguy83@gmail.com>
Brian Tracy <brian.tracy33@gmail.com>
Brian Wieder <brian@4wieders.com>
Bruno Sousa <bruno.sousa@docker.com>
Bryan Bess <squarejaw@bsbess.com>
Bryan Boreham <bjboreham@gmail.com>
Bryan Murphy <bmurphy1976@gmail.com>
bryfry <bryon.fryer@gmail.com>
Calvin Liu <flycalvin@qq.com>
Cameron Spear <cameronspear@gmail.com>
Cao Weiwei <cao.weiwei30@zte.com.cn>
Carlo Mion <mion00@gmail.com>
Carlos Alexandro Becker <caarlos0@gmail.com>
Carlos de Paula <me@carlosedp.com>
Casey Korver <casey@korver.dev>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
Cezar Sa Espinola <cezarsa@gmail.com>
Chad Faragher <wyckster@hotmail.com>
Chao Wang <wangchao.fnst@cn.fujitsu.com>
Charles Chan <charleswhchan@users.noreply.github.com>
Charles Law <claw@conduce.com>
Charles Smith <charles.smith@docker.com>
Charlie Drage <charlie@charliedrage.com>
Charlotte Mach <charlotte.mach@fs.lmu.de>
ChaYoung You <yousbe@gmail.com>
Chee Hau Lim <cheehau.lim@mobimeo.com>
Chen Chuanliang <chen.chuanliang@zte.com.cn>
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
Chen Mingjie <chenmingjie0828@163.com>
Chen Qiu <cheney-90@hotmail.com>
Chris Chinchilla <chris@chrischinchilla.com>
Chris Couzens <ccouzens@gmail.com>
Chris Gavin <chris@chrisgavin.me>
Chris Gibson <chris@chrisg.io>
Chris McKinnel <chrismckinnel@gmail.com>
Chris Snow <chsnow123@gmail.com>
Chris Vermilion <christopher.vermilion@gmail.com>
Chris Weyl <cweyl@alumni.drew.edu>
Christian Persson <saser@live.se>
Christian Stefanescu <st.chris@gmail.com>
Christophe Robin <crobin@nekoo.com>
Christophe Vidal <kriss@krizalys.com>
Christopher Biscardi <biscarch@sketcht.com>
Christopher Crone <christopher.crone@docker.com>
Christopher Jones <tophj@linux.vnet.ibm.com>
Christopher Petito <47751006+krissetto@users.noreply.github.com>
Christopher Petito <chrisjpetito@gmail.com>
Christopher Svensson <stoffus@stoffus.com>
Christy Norman <christy@linux.vnet.ibm.com>
Chun Chen <ramichen@tencent.com>
Clinton Kitson <clintonskitson@gmail.com>
Coenraad Loubser <coenraad@wish.org.za>
Colin Hebert <hebert.colin@gmail.com>
Collin Guarino <collin.guarino@gmail.com>
Colm Hally <colmhally@gmail.com>
Comical Derskeal <27731088+derskeal@users.noreply.github.com>
Conner Crosby <conner@cavcrosby.tech>
Corey Farrell <git@cfware.com>
Corey Quon <corey.quon@docker.com>
Cory Bennet <cbennett@netflix.com>
Cory Snider <csnider@mirantis.com>
Craig Osterhout <craig.osterhout@docker.com>
Craig Wilhite <crwilhit@microsoft.com>
Cristian Staretu <cristian.staretu@gmail.com>
Daehyeok Mun <daehyeok@gmail.com>
Dafydd Crosby <dtcrsby@gmail.com>
Daisuke Ito <itodaisuke00@gmail.com>
dalanlan <dalanlan925@gmail.com>
Damien Nadé <github@livna.org>
Dan Cotora <dan@bluevision.ro>
Danial Gharib <danial.mail.gh@gmail.com>
Daniel Artine <daniel.artine@ufrj.br>
Daniel Cassidy <mail@danielcassidy.me.uk>
Daniel Dao <dqminh@cloudflare.com>
Daniel Farrell <dfarrell@redhat.com>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Goosen <daniel.goosen@surveysampling.com>
Daniel Helfand <dhelfand@redhat.com>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Daniel J Walsh <dwalsh@redhat.com>
Daniel Nephin <dnephin@docker.com>
Daniel Norberg <dano@spotify.com>
Daniel Watkins <daniel@daniel-watkins.co.uk>
Daniel Zhang <jmzwcn@gmail.com>
Daniil Nikolenko <qoo2p5@gmail.com>
Danny Berger <dpb587@gmail.com>
Darren Shepherd <darren.s.shepherd@gmail.com>
Darren Stahl <darst@microsoft.com>
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
Dave Goodchild <buddhamagnet@gmail.com>
Dave Henderson <dhenderson@gmail.com>
Dave Tucker <dt@docker.com>
David Alvarez <david.alvarez@flyeralarm.com>
David Beitey <david@davidjb.com>
David Calavera <david.calavera@gmail.com>
David Cramer <davcrame@cisco.com>
David Dooling <dooling@gmail.com>
David Gageot <david@gageot.net>
David Karlsson <david.karlsson@docker.com>
David le Blanc <systemmonkey42@users.noreply.github.com>
David Lechner <david@lechnology.com>
David Scott <dave@recoil.org>
David Sheets <dsheets@docker.com>
David Williamson <david.williamson@docker.com>
David Xia <dxia@spotify.com>
David Young <yangboh@cn.ibm.com>
Deng Guangxing <dengguangxing@huawei.com>
Denis Defreyne <denis@soundcloud.com>
Denis Gladkikh <denis@gladkikh.email>
Denis Ollier <larchunix@users.noreply.github.com>
Dennis Docter <dennis@d23.nl>
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Derek McGowan <derek@mcg.dev>
Des Preston <despreston@gmail.com>
Deshi Xiao <dxiao@redhat.com>
Dharmit Shah <shahdharmit@gmail.com>
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
Dieter Reuter <dieter.reuter@me.com>
Dima Stopel <dima@twistlock.com>
Dimitry Andric <d.andric@activevideo.com>
Ding Fei <dingfei@stars.org.cn>
Diogo Monica <diogo@docker.com>
Djordje Lukic <djordje.lukic@docker.com>
Dmitriy Fishman <fishman.code@gmail.com>
Dmitry Gusev <dmitry.gusev@gmail.com>
Dmitry Smirnov <onlyjob@member.fsf.org>
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
Dominik Braun <dominik.braun@nbsp.de>
Don Kjer <don.kjer@gmail.com>
Dong Chen <dongluo.chen@docker.com>
DongGeon Lee <secmatth1996@gmail.com>
Doug Davis <dug@us.ibm.com>
Drew Erny <derny@mirantis.com>
Ed Costello <epc@epcostello.com>
Ed Morley <501702+edmorley@users.noreply.github.com>
Elango Sivanandam <elango.siva@docker.com>
Eli Uriegas <eli.uriegas@docker.com>
Eli Uriegas <seemethere101@gmail.com>
Elias Faxö <elias.faxo@tre.se>
Elliot Luo <956941328@qq.com>
Eric Bode <eric.bode@foundries.io>
Eric Curtin <ericcurtin17@gmail.com>
Eric Engestrom <eric@engestrom.ch>
Eric G. Noriega <enoriega@vizuri.com>
Eric Rosenberg <ehaydenr@gmail.com>
Eric Sage <eric.david.sage@gmail.com>
Eric-Olivier Lamey <eo@lamey.me>
Erica Windisch <erica@windisch.us>
Erik Hollensbe <github@hollensbe.org>
Erik Humphrey <erik.humphrey@carleton.ca>
Erik St. Martin <alakriti@gmail.com>
Essam A. Hassan <es.hassan187@gmail.com>
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
Euan Kemp <euank@euank.com>
Eugene Yakubovich <eugene.yakubovich@coreos.com>
Evan Allrich <evan@unguku.com>
Evan Hazlett <ejhazlett@gmail.com>
Evan Krall <krall@yelp.com>
Evan Lezar <elezar@nvidia.com>
Evelyn Xu <evelynhsu21@gmail.com>
Everett Toews <everett.toews@rackspace.com>
Fabio Falci <fabiofalci@gmail.com>
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
Felix Geyer <debfx@fobos.de>
Felix Hupfeld <felix@quobyte.com>
Felix Rabe <felix@rabe.io>
fezzik1620 <fezzik1620@users.noreply.github.com>
Filip Jareš <filipjares@gmail.com>
Flavio Crisciani <flavio.crisciani@docker.com>
Florian Klein <florian.klein@free.fr>
Forest Johnson <fjohnson@peoplenetonline.com>
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
François Scala <francois.scala@swiss-as.com>
Fred Lifton <fred.lifton@docker.com>
Frederic Hemberger <mail@frederic-hemberger.de>
Frederick F. Kautz IV <fkautz@redhat.com>
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
Frieder Bluemle <frieder.bluemle@gmail.com>
Gabriel Gore <gabgore@cisco.com>
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
Gabriela Georgieva <gabriela.georgieva@docker.com>
Gaetan de Villele <gdevillele@gmail.com>
Gang Qiao <qiaohai8866@gmail.com>
Gary Schaetz <gary@schaetzkc.com>
Genki Takiuchi <genki@s21g.com>
George MacRorie <gmacr31@gmail.com>
George Margaritis <gmargaritis@protonmail.com>
George Xie <georgexsh@gmail.com>
Gianluca Borello <g.borello@gmail.com>
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
Gio d'Amelio <giodamelio@gmail.com>
Gleb Stsenov <gleb.stsenov@gmail.com>
Goksu Toprak <goksu.toprak@docker.com>
Gou Rao <gou@portworx.com>
Govind Rai <raigovind93@gmail.com>
Grace Choi <grace.54109@gmail.com>
Graeme Wiebe <graeme.wiebe@gmail.com>
Grant Reaber <grant.reaber@gmail.com>
Greg Pflaum <gpflaum@users.noreply.github.com>
Gsealy <jiaojingwei1001@hotmail.com>
Guilhem Lettron <guilhem+github@lettron.fr>
Guillaume J. Charmes <guillaume.charmes@docker.com>
Guillaume Le Floch <glfloch@gmail.com>
Guillaume Tardif <guillaume.tardif@gmail.com>
gwx296173 <gaojing3@huawei.com>
Günther Jungbluth <gunther@gameslabs.net>
Hakan Özler <hakan.ozler@kodcu.com>
Hao Zhang <21521210@zju.edu.cn>
Harald Albers <github@albersweb.de>
Harold Cooper <hrldcpr@gmail.com>
Harry Zhang <harryz@hyper.sh>
He Simei <hesimei@zju.edu.cn>
Hector S <hfsam88@gmail.com>
Helen Xie <chenjg@harmonycloud.cn>
Henning Sprang <henning.sprang@gmail.com>
Henry N <henrynmail-github@yahoo.de>
Hernan Garcia <hernandanielg@gmail.com>
Hongbin Lu <hongbin034@gmail.com>
Hu Keping <hukeping@huawei.com>
Huayi Zhang <irachex@gmail.com>
Hugo Chastel <Hugo-C@users.noreply.github.com>
Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
huqun <huqun@zju.edu.cn>
Huu Nguyen <huu@prismskylabs.com>
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
Iain Samuel McLean Elder <iain@isme.es>
Ian Campbell <ian.campbell@docker.com>
Ian Philpot <ian.philpot@microsoft.com>
Ignacio Capurro <icapurrofagian@gmail.com>
Ilya Dmitrichenko <errordeveloper@gmail.com>
Ilya Khlopotov <ilya.khlopotov@gmail.com>
Ilya Sotkov <ilya@sotkov.com>
Ioan Eugen Stan <eu@ieugen.ro>
Isabel Jimenez <contact.isabeljimenez@gmail.com>
Ivan Grcic <igrcic@gmail.com>
Ivan Grund <ivan.grund@gmail.com>
Ivan Markin <sw@nogoegst.net>
Jacob Atzen <jacob@jacobatzen.dk>
Jacob Tomlinson <jacob@tom.linson.uk>
Jacopo Rigoli <rigoli.jacopo@gmail.com>
Jaivish Kothari <janonymous.codevulture@gmail.com>
Jake Lambert <jake.lambert@volusion.com>
Jake Sanders <jsand@google.com>
Jake Stokes <contactjake@developerjake.com>
Jakub Panek <me@panekj.dev>
James Nesbitt <james.nesbitt@wunderkraut.com>
James Turnbull <james@lovedthanlost.net>
Jamie Hannaford <jamie@limetree.org>
Jan Koprowski <jan.koprowski@gmail.com>
Jan Pazdziora <jpazdziora@redhat.com>
Jan-Jaap Driessen <janjaapdriessen@gmail.com>
Jana Radhakrishnan <mrjana@docker.com>
Jared Hocutt <jaredh@netapp.com>
Jasmine Hegman <jasmine@jhegman.com>
Jason Hall <jason@chainguard.dev>
Jason Heiss <jheiss@aput.net>
Jason Plum <jplum@devonit.com>
Jay Kamat <github@jgkamat.33mail.com>
Jean Lecordier <jeanlecordier@hotmail.fr>
Jean Rouge <rougej+github@gmail.com>
Jean-Christophe Sirot <jean-christophe.sirot@docker.com>
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
Jeff Lindsay <progrium@gmail.com>
Jeff Nickoloff <jeff.nickoloff@gmail.com>
Jeff Silberman <jsilberm@gmail.com>
Jennings Zhang <jenni_zh@protonmail.com>
Jeremy Chambers <jeremy@thehipbot.com>
Jeremy Unruh <jeremybunruh@gmail.com>
Jeremy Yallop <yallop@docker.com>
Jeroen Franse <jeroenfranse@gmail.com>
Jesse Adametz <jesseadametz@gmail.com>
Jessica Frazelle <jess@oxide.computer>
Jezeniel Zapanta <jpzapanta22@gmail.com>
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
Jie Luo <luo612@zju.edu.cn>
Jilles Oldenbeuving <ojilles@gmail.com>
Jim Chen <njucjc@gmail.com>
Jim Galasyn <jim.galasyn@docker.com>
Jim Lin <b04705003@ntu.edu.tw>
Jimmy Leger <jimmy.leger@gmail.com>
Jimmy Song <rootsongjc@gmail.com>
jimmyxian <jimmyxian2004@yahoo.com.cn>
Jintao Zhang <zhangjintao9020@gmail.com>
Joao Fernandes <joao.fernandes@docker.com>
Joe Abbey <joe.abbey@gmail.com>
Joe Doliner <jdoliner@pachyderm.io>
Joe Gordon <joe.gordon0@gmail.com>
Joel Handwell <joelhandwell@gmail.com>
Joey Geiger <jgeiger@gmail.com>
Joffrey F <joffrey@docker.com>
Johan Euphrosine <proppy@google.com>
Johannes 'fish' Ziemke <github@freigeist.org>
John Feminella <jxf@jxf.me>
John Harris <john@johnharris.io>
John Howard <github@lowenna.com>
John Howard <howardjohn@google.com>
John Laswell <john.n.laswell@gmail.com>
John Maguire <jmaguire@duosecurity.com>
John Mulhausen <john@docker.com>
John Starks <jostarks@microsoft.com>
John Stephens <johnstep@docker.com>
John Tims <john.k.tims@gmail.com>
John V. Martinez <jvmatl@gmail.com>
John Willis <john.willis@docker.com>
Jon Johnson <jonjohnson@google.com>
Jon Zeolla <zeolla@gmail.com>
Jonatas Baldin <jonatas.baldin@gmail.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jonathan Lee <jonjohn1232009@gmail.com>
Jonathan Lomas <jonathan@floatinglomas.ca>
Jonathan McCrohan <jmccrohan@gmail.com>
Jonathan Warriss-Simmons <misterws@diogenes.ws>
Jonh Wendell <jonh.wendell@redhat.com>
Jordan Jennings <jjn2009@gmail.com>
Jorge Vallecillo <jorgevallecilloc@gmail.com>
Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com>
Joseph Kern <jkern@semafour.net>
Josh Bodah <jb3689@yahoo.com>
Josh Chorlton <jchorlton@gmail.com>
Josh Hawn <josh.hawn@docker.com>
Josh Horwitz <horwitz@addthis.com>
Josh Soref <jsoref@gmail.com>
Julian <gitea+julian@ic.thejulian.uk>
Julien Barbier <write0@gmail.com>
Julien Kassar <github@kassisol.com>
Julien Maitrehenry <julien.maitrehenry@me.com>
Justas Brazauskas <brazauskasjustas@gmail.com>
Justin Chadwell <me@jedevc.com>
Justin Cormack <justin.cormack@docker.com>
Justin Simonelis <justin.p.simonelis@gmail.com>
Justyn Temme <justyntemme@gmail.com>
Jyrki Puttonen <jyrkiput@gmail.com>
Jérémie Drouet <jeremie.drouet@gmail.com>
Jérôme Petazzoni <jerome.petazzoni@docker.com>
Jörg Thalheim <joerg@higgsboson.tk>
Kai Blin <kai@samba.org>
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
Kara Alexandra <kalexandra@us.ibm.com>
Kareem Khazem <karkhaz@karkhaz.com>
Karthik Nayak <Karthik.188@gmail.com>
Kat Samperi <kat.samperi@gmail.com>
Kathryn Spiers <kathryn@spiers.me>
Katie McLaughlin <katie@glasnt.com>
Ke Xu <leonhartx.k@gmail.com>
Kei Ohmura <ohmura.kei@gmail.com>
Keith Hudgins <greenman@greenman.org>
Kelton Bassingthwaite <KeltonBassingthwaite@gmail.com>
Ken Cochrane <kencochrane@gmail.com>
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
Kevin Alvarez <github@crazymax.dev>
Kevin Burke <kev@inburke.com>
Kevin Feyrer <kevin.feyrer@btinternet.com>
Kevin Kern <kaiwentan@harmonycloud.cn>
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
Kevin Meredith <kevin.m.meredith@gmail.com>
Kevin Richardson <kevin@kevinrichardson.co>
Kevin Woblick <mail@kovah.de>
khaled souf <khaled.souf@gmail.com>
Kim Eik <kim@heldig.org>
Kir Kolyshkin <kolyshkin@gmail.com>
Kirill A. Korinsky <kirill@korins.ky>
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
Krasi Georgiev <krasi@vip-consult.solutions>
Kris-Mikael Krister <krismikael@protonmail.com>
Kun Zhang <zkazure@gmail.com>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
Kyle Mitofsky <Kylemit@gmail.com>
Lachlan Cooper <lachlancooper@gmail.com>
Lai Jiangshan <jiangshanlai@gmail.com>
Lars Kellogg-Stedman <lars@redhat.com>
Laura Brehm <laurabrehm@hey.com>
Laura Frank <ljfrank@gmail.com>
Laurent Erignoux <lerignoux@gmail.com>
Lee Gaines <eightlimbed@gmail.com>
Lei Jitang <leijitang@huawei.com>
Lennie <github@consolejunkie.net>
Leo Gallucci <elgalu3@gmail.com>
Leonid Skorospelov <leosko94@gmail.com>
Lewis Daly <lewisdaly@me.com>
Li Fu Bang <lifubang@acmcoder.com>
Li Yi <denverdino@gmail.com>
Li Yi <weiyuan.yl@alibaba-inc.com>
Liang-Chi Hsieh <viirya@gmail.com>
Lihua Tang <lhtang@alauda.io>
Lily Guo <lily.guo@docker.com>
Lin Lu <doraalin@163.com>
Linus Heckemann <lheckemann@twig-world.com>
Liping Xue <lipingxue@gmail.com>
Liron Levin <liron@twistlock.com>
liwenqi <vikilwq@zju.edu.cn>
lixiaobing10051267 <li.xiaobing1@zte.com.cn>
Lloyd Dewolf <foolswisdom@gmail.com>
Lorenzo Fontana <lo@linux.com>
Louis Opter <kalessin@kalessin.fr>
Luca Favatella <luca.favatella@erlang-solutions.com>
Luca Marturana <lucamarturana@gmail.com>
Lucas Chan <lucas-github@lucaschan.com>
Luis Henrique Mulinari <luis.mulinari@gmail.com>
Luka Hartwig <mail@lukahartwig.de>
Lukas Heeren <lukas-heeren@hotmail.com>
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
Lydell Manganti <LydellManganti@users.noreply.github.com>
Lénaïc Huard <lhuard@amadeus.com>
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
Mabin <bin.ma@huawei.com>
Maciej Kalisz <maciej.d.kalisz@gmail.com>
Madhav Puri <madhav.puri@gmail.com>
Madhu Venugopal <madhu@socketplane.io>
Madhur Batra <madhurbatra097@gmail.com>
Malte Janduda <mail@janduda.net>
Manjunath A Kumatagi <mkumatag@in.ibm.com>
Mansi Nahar <mmn4185@rit.edu>
mapk0y <mapk0y@gmail.com>
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
Marc Cornellà <hello@mcornella.com>
Marco Mariani <marco.mariani@alterway.fr>
Marco Spiess <marco.spiess@hotmail.de>
Marco Vedovati <mvedovati@suse.com>
Marcus Martins <marcus@docker.com>
Marianna Tessel <mtesselh@gmail.com>
Marius Ileana <marius.ileana@gmail.com>
Marius Meschter <marius@meschter.me>
Marius Sturm <marius@graylog.com>
Mark Oates <fl0yd@me.com>
Marsh Macy <marsma@microsoft.com>
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
Mary Anthony <mary.anthony@docker.com>
Mason Fish <mason.fish@docker.com>
Mason Malone <mason.malone@gmail.com>
Mateusz Major <apkd@users.noreply.github.com>
Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com>
Mathieu Champlon <mathieu.champlon@docker.com>
Mathieu Rollet <matletix@gmail.com>
Matt Gucci <matt9ucci@gmail.com>
Matt Robenolt <matt@ydekproductions.com>
Matteo Orefice <matteo.orefice@bites4bits.software>
Matthew Heon <mheon@redhat.com>
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
Mauro Porras P <mauroporrasp@gmail.com>
Max Shytikov <mshytikov@gmail.com>
Max-Julian Pogner <max-julian@pogner.at>
Maxime Petazzoni <max@signalfuse.com>
Maximillian Fan Xavier <maximillianfx@gmail.com>
Mei ChunTao <mei.chuntao@zte.com.cn>
Melroy van den Berg <melroy@melroy.org>
Metal <2466052+tedhexaflow@users.noreply.github.com>
Micah Zoltu <micah@newrelic.com>
Michael A. Smith <michael@smith-li.com>
Michael Bridgen <mikeb@squaremobius.net>
Michael Crosby <crosbymichael@gmail.com>
Michael Friis <friism@gmail.com>
Michael Irwin <mikesir87@gmail.com>
Michael Käufl <docker@c.michael-kaeufl.de>
Michael Prokop <github@michael-prokop.at>
Michael Scharf <github@scharf.gr>
Michael Spetsiotis <michael_spets@hotmail.com>
Michael Steinert <mike.steinert@gmail.com>
Michael West <mwest@mdsol.com>
Michal Minář <miminar@redhat.com>
Michał Czeraszkiewicz <czerasz@gmail.com>
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com>
Mihai Borobocea <MihaiBorob@gmail.com>
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
Mike Brown <brownwm@us.ibm.com>
Mike Casas <mkcsas0@gmail.com>
Mike Dalton <mikedalton@github.com>
Mike Danese <mikedanese@google.com>
Mike Dillon <mike@embody.org>
Mike Goelzer <mike.goelzer@docker.com>
Mike MacCana <mike.maccana@gmail.com>
mikelinjie <294893458@qq.com>
Mikhail Vasin <vasin@cloud-tv.ru>
Milind Chawre <milindchawre@gmail.com>
Mindaugas Rukas <momomg@gmail.com>
Miroslav Gula <miroslav.gula@naytrolabs.com>
Misty Stanley-Jones <misty@docker.com>
Mohammad Banikazemi <mb@us.ibm.com>
Mohammed Aaqib Ansari <maaquib@gmail.com>
Mohini Anne Dsouza <mohini3917@gmail.com>
Moorthy RS <rsmoorthy@gmail.com>
Morgan Bauer <mbauer@us.ibm.com>
Morten Hekkvang <morten.hekkvang@sbab.se>
Morten Linderud <morten@linderud.pw>
Moysés Borges <moysesb@gmail.com>
Mozi <29089388+pzhlkj6612@users.noreply.github.com>
Mrunal Patel <mrunalp@gmail.com>
muicoder <muicoder@gmail.com>
Murukesh Mohanan <murukesh.mohanan@gmail.com>
Muthukumar R <muthur@gmail.com>
Máximo Cuadros <mcuadros@gmail.com>
Mårten Cassel <marten.cassel@gmail.com>
Nace Oroz <orkica@gmail.com>
Nahum Shalman <nshalman@omniti.com>
Nalin Dahyabhai <nalin@redhat.com>
Nao YONASHIRO <owan.orisano@gmail.com>
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
Natalie Parker <nparker@omnifone.com>
Nate Brennand <nate.brennand@clever.com>
Nathan Hsieh <hsieh.nathan@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com>
Nathan McCauley <nathan.mccauley@docker.com>
Neil Peterson <neilpeterson@outlook.com>
Nick Adcock <nick.adcock@docker.com>
Nick Santos <nick.santos@docker.com>
Nick Sieger <nick@nicksieger.com>
Nico Stapelbroek <nstapelbroek@gmail.com>
Nicola Kabar <nicolaka@gmail.com>
Nicolas Borboën <ponsfrilus@gmail.com>
Nicolas De Loof <nicolas.deloof@gmail.com>
Nikhil Chawla <chawlanikhil24@gmail.com>
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
Nikolay Milovanov <nmil@itransformers.net>
Nir Soffer <nsoffer@redhat.com>
Nishant Totla <nishanttotla@gmail.com>
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
Noah Treuhaft <noah.treuhaft@docker.com>
O.S. Tezer <ostezer@gmail.com>
Oded Arbel <oded@geek.co.il>
Odin Ugedal <odin@ugedal.com>
ohmystack <jun.jiang02@ele.me>
OKA Naoya <git@okanaoya.com>
Oliver Pomeroy <oppomeroy@gmail.com>
Olle Jonsson <olle.jonsson@gmail.com>
Olli Janatuinen <olli.janatuinen@gmail.com>
Oscar Wieman <oscrx@icloud.com>
Otto Kekäläinen <otto@seravo.fi>
Ovidio Mallo <ovidio.mallo@gmail.com>
Pascal Borreli <pascal@borreli.com>
Patrick Böänziger <patrick.baenziger@bsi-software.com>
Patrick Daigle <114765035+pdaig@users.noreply.github.com>
Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick Lang <plang@microsoft.com>
Paul <paul9869@gmail.com>
Paul Kehrer <paul.l.kehrer@gmail.com>
Paul Lietar <paul@lietar.net>
Paul Mulders <justinkb@gmail.com>
Paul Seyfert <pseyfert.mathphys@gmail.com>
Paul Weaver <pauweave@cisco.com>
Pavel Pospisil <pospispa@gmail.com>
Paweł Gronowski <pawel.gronowski@docker.com>
Paweł Pokrywka <pepawel@users.noreply.github.com>
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
Per Lundberg <perlun@gmail.com>
Peter Dave Hello <hsu@peterdavehello.org>
Peter Edge <peter.edge@gmail.com>
Peter Hsu <shhsu@microsoft.com>
Peter Jaffe <pjaffe@nevo.com>
Peter Kehl <peter.kehl@gmail.com>
Peter Nagy <xificurC@gmail.com>
Peter Salvatore <peter@psftw.com>
Peter Waller <p@pwaller.net>
Phil Estes <estesp@gmail.com>
Philip Alexander Etling <paetling@gmail.com>
Philipp Gillé <philipp.gille@gmail.com>
Philipp Schmied <pschmied@schutzwerk.com>
Phong Tran <tran.pho@northeastern.edu>
pidster <pid@pidster.com>
Pieter E Smit <diepes@github.com>
pixelistik <pixelistik@users.noreply.github.com>
Pratik Karki <prertik@outlook.com>
Prayag Verma <prayag.verma@gmail.com>
Preston Cowley <preston.cowley@sony.com>
Pure White <daniel48@126.com>
Qiang Huang <h.huangqiang@huawei.com>
Qinglan Peng <qinglanpeng@zju.edu.cn>
QQ喵 <gqqnb2005@gmail.com>
qudongfang <qudongfang@gmail.com>
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Rahul Kadyan <hi@znck.me>
Rahul Zoldyck <rahulzoldyck@gmail.com>
Ravi Shekhar Jethani <rsjethani@gmail.com>
Ray Tsang <rayt@google.com>
Reficul <xuzhenglun@gmail.com>
Remy Suen <remy.suen@gmail.com>
Renaud Gaubert <rgaubert@nvidia.com>
Ricardo N Feliciano <FelicianoTech@gmail.com>
Rich Moyse <rich@moyse.us>
Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com>
Richard Mathie <richard.mathie@amey.co.uk>
Richard Scothern <richard.scothern@gmail.com>
Rick Wieman <git@rickw.nl>
Ritesh H Shukla <sritesh@vmware.com>
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
Rob Gulewich <rgulewich@netflix.com>
Rob Murray <rob.murray@docker.com>
Robert Wallis <smilingrob@gmail.com>
Robin Naundorf <r.naundorf@fh-muenster.de>
Robin Speekenbrink <robin@kingsquare.nl>
Roch Feuillade <roch.feuillade@pandobac.com>
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
Rogelio Canedo <rcanedo@mappy.priv>
Rohan Verma <hello@rohanverma.net>
Roland Kammerer <roland.kammerer@linbit.com>
Roman Dudin <katrmr@gmail.com>
Rory Hunter <roryhunter2@gmail.com>
Ross Boucher <rboucher@gmail.com>
Rubens Figueiredo <r.figueiredo.52@gmail.com>
Rui Cao <ruicao@alauda.io>
Rui JingAn <quiterace@gmail.com>
Ryan Belgrave <rmb1993@gmail.com>
Ryan Detzel <ryan.detzel@gmail.com>
Ryan Stelly <ryan.stelly@live.com>
Ryan Wilson-Perkin <ryanwilsonperkin@gmail.com>
Ryan Zhang <ryan.zhang@docker.com>
Sainath Grandhi <sainath.grandhi@intel.com>
Sakeven Jiang <jc5930@sina.cn>
Sally O'Malley <somalley@redhat.com>
Sam Neirinck <sam@samneirinck.com>
Sam Thibault <sam.thibault@docker.com>
Samarth Shah <samashah@microsoft.com>
Sambuddha Basu <sambuddhabasu1@gmail.com>
Sami Tabet <salph.tabet@gmail.com>
Samuel Cochran <sj26@sj26.com>
Samuel Karp <skarp@amazon.com>
Sandro Jäckel <sandro.jaeckel@gmail.com>
Santhosh Manohar <santhosh@docker.com>
Sargun Dhillon <sargun@netflix.com>
Saswat Bhattacharya <sas.saswat@gmail.com>
Saurabh Kumar <saurabhkumar0184@gmail.com>
Scott Brenner <scott@scottbrenner.me>
Scott Collier <emailscottcollier@gmail.com>
Sean Christopherson <sean.j.christopherson@intel.com>
Sean Rodman <srodman7689@gmail.com>
Sebastiaan van Stijn <github@gone.nl>
Sergey Tryuber <Sergeant007@users.noreply.github.com>
Serhat Gülçiçek <serhat25@gmail.com>
Sevki Hasirci <s@sevki.org>
Shaun Kaasten <shaunk@gmail.com>
Sheng Yang <sheng@yasker.org>
Shijiang Wei <mountkin@gmail.com>
Shishir Mahajan <shishir.mahajan@redhat.com>
Shoubhik Bose <sbose78@gmail.com>
Shukui Yang <yangshukui@huawei.com>
Sian Lerk Lau <kiawin@gmail.com>
Sidhartha Mani <sidharthamn@gmail.com>
sidharthamani <sid@rancher.com>
Silvin Lubecki <silvin.lubecki@docker.com>
Simei He <hesimei@zju.edu.cn>
Simon Ferquel <simon.ferquel@docker.com>
Simon Heimberg <simon.heimberg@heimberg-ea.ch>
Sindhu S <sindhus@live.in>
Slava Semushin <semushin@redhat.com>
Solomon Hykes <solomon@docker.com>
Song Gao <song@gao.io>
Spencer Brown <spencer@spencerbrown.org>
Spring Lee <xi.shuai@outlook.com>
squeegels <lmscrewy@gmail.com>
Srini Brahmaroutu <srbrahma@us.ibm.com>
Stefan S. <tronicum@user.github.com>
Stefan Scherer <stefan.scherer@docker.com>
Stefan Weil <sw@weilnetz.de>
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
Stephen Day <stevvooe@gmail.com>
Stephen Rust <srust@blockbridge.com>
Steve Durrheimer <s.durrheimer@gmail.com>
Steve Richards <steve.richards@docker.com>
Steven Burgess <steven.a.burgess@hotmail.com>
Stoica-Marcu Floris-Andrei <floris.sm@gmail.com>
Subhajit Ghosh <isubuz.g@gmail.com>
Sun Jianbo <wonderflow.sun@gmail.com>
Sune Keller <absukl@almbrand.dk>
Sungwon Han <sungwon.han@navercorp.com>
Sunny Gogoi <indiasuny000@gmail.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Sébastien HOUZÉ <cto@verylastroom.com>
T K Sourabh <sourabhtk37@gmail.com>
TAGOMORI Satoshi <tagomoris@gmail.com>
taiji-tech <csuhqg@foxmail.com>
Takeshi Koenuma <t.koenuma2@gmail.com>
Takuya Noguchi <takninnovationresearch@gmail.com>
Taylor Jones <monitorjbl@gmail.com>
Teiva Harsanyi <t.harsanyi@thebeat.co>
Tejaswini Duggaraju <naduggar@microsoft.com>
Tengfei Wang <tfwang@alauda.io>
Teppei Fukuda <knqyf263@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thibault Coupin <thibault.coupin@gmail.com>
Thomas Gazagnaire <thomas@gazagnaire.org>
Thomas Krzero <thomas.kovatchitch@gmail.com>
Thomas Leonard <thomas.leonard@docker.com>
Thomas Léveil <thomasleveil@gmail.com>
Thomas Riccardi <thomas@deepomatic.com>
Thomas Swift <tgs242@gmail.com>
Tianon Gravi <admwiggin@gmail.com>
Tianyi Wang <capkurmagati@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tim Dettrick <t.dettrick@uq.edu.au>
Tim Hockin <thockin@google.com>
Tim Sampson <tim@sampson.fi>
Tim Smith <timbot@google.com>
Tim Waugh <twaugh@redhat.com>
Tim Welsh <timothy.welsh@docker.com>
Tim Wraight <tim.wraight@tangentlabs.co.uk>
timfeirg <kkcocogogo@gmail.com>
Timothy Hobbs <timothyhobbs@seznam.cz>
Tobias Bradtke <webwurst@gmail.com>
Tobias Gesellchen <tobias@gesellix.de>
Todd Whiteman <todd.whiteman@joyent.com>
Tom Denham <tom@tomdee.co.uk>
Tom Fotherby <tom+github@peopleperhour.com>
Tom Klingenberg <tklingenberg@lastflood.net>
Tom Milligan <code@tommilligan.net>
Tom X. Tobin <tomxtobin@tomxtobin.com>
Tomas Bäckman <larstomas@gmail.com>
Tomas Tomecek <ttomecek@redhat.com>
Tomasz Kopczynski <tomek@kopczynski.net.pl>
Tomáš Hrčka <thrcka@redhat.com>
Tony Abboud <tdabboud@hotmail.com>
Tõnis Tiigi <tonistiigi@gmail.com>
Trapier Marshall <trapier.marshall@docker.com>
Travis Cline <travis.cline@gmail.com>
Tristan Carel <tristan@cogniteev.com>
Tycho Andersen <tycho@docker.com>
Tycho Andersen <tycho@tycho.ws>
uhayate <uhayate.gong@daocloud.io>
Ulrich Bareth <ulrich.bareth@gmail.com>
Ulysses Souza <ulysses.souza@docker.com>
Umesh Yadav <umesh4257@gmail.com>
Vaclav Struhar <struharv@gmail.com>
Valentin Lorentz <progval+git@progval.net>
Vardan Pogosian <vardan.pogosyan@gmail.com>
Venkateswara Reddy Bukkasamudram <bukkasamudram@outlook.com>
Veres Lajos <vlajos@gmail.com>
Victor Vieux <victor.vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
Viktor Stanchev <me@viktorstanchev.com>
Ville Skyttä <ville.skytta@iki.fi>
Vimal Raghubir <vraghubir0418@gmail.com>
Vincent Batts <vbatts@redhat.com>
Vincent Bernat <Vincent.Bernat@exoscale.ch>
Vincent Demeester <vincent.demeester@docker.com>
Vincent Woo <me@vincentwoo.com>
Vishnu Kannan <vishnuk@google.com>
Vivek Goyal <vgoyal@redhat.com>
Wang Jie <wangjie5@chinaskycloud.com>
Wang Lei <wanglei@tenxcloud.com>
Wang Long <long.wanglong@huawei.com>
Wang Ping <present.wp@icloud.com>
Wang Xing <hzwangxing@corp.netease.com>
Wang Yuexiao <wang.yuexiao@zte.com.cn>
Wang Yumu <37442693@qq.com>
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
Wayne Song <wsong@docker.com>
Wen Cheng Ma <wenchma@cn.ibm.com>
Wenzhi Liang <wenzhi.liang@gmail.com>
Wes Morgan <cap10morgan@gmail.com>
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
William Henry <whenry@redhat.com>
Xianglin Gao <xlgao@zju.edu.cn>
Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com>
Xiaoxi He <xxhe@alauda.io>
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
Xuecong Liao <satorulogic@gmail.com>
Yan Feng <yanfeng2@huawei.com>
Yanqiang Miao <miao.yanqiang@zte.com.cn>
Yassine Tijani <yasstij11@gmail.com>
Yi EungJun <eungjun.yi@navercorp.com>
Ying Li <ying.li@docker.com>
Yong Tang <yong.tang.github@outlook.com>
Yosef Fertel <yfertel@gmail.com>
Yu Peng <yu.peng36@zte.com.cn>
Yuan Sun <sunyuan3@huawei.com>
Yucheng Wu <wyc123wyc@gmail.com>
Yue Zhang <zy675793960@yeah.net>
Yunxiang Huang <hyxqshk@vip.qq.com>
Zachary Romero <zacromero3@gmail.com>
Zander Mackie <zmackie@gmail.com>
zebrilee <zebrilee@gmail.com>
Zeel B Patel <patel_zeel@iitgn.ac.in>
Zhang Kun <zkazure@gmail.com>
Zhang Wei <zhangwei555@huawei.com>
Zhang Wentao <zhangwentao234@huawei.com>
ZhangHang <stevezhang2014@gmail.com>
zhenghenghuo <zhenghenghuo@zju.edu.cn>
Zhiwei Liang <zliang@akamai.com>
Zhou Hao <zhouhao@cn.fujitsu.com>
Zhoulin Xie <zhoulin.xie@daocloud.io>
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
Zhuo Zhi <h.dwwwwww@gmail.com>
Álex González <agonzalezro@gmail.com>
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
Átila Camurça Alves <camurca.home@gmail.com>
Александр Менщиков <__Singleton__@hackerdom.ru>
徐俊杰 <paco.xu@daocloud.io>

191
vendor/github.com/docker/cli/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/cli/NOTICE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/creack/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -0,0 +1,18 @@
package hooks
import (
"fmt"
"io"
"github.com/morikuni/aec"
)
func PrintNextSteps(out io.Writer, messages []string) {
if len(messages) == 0 {
return
}
fmt.Fprintln(out, aec.Bold.Apply("\nWhat's next:"))
for _, n := range messages {
_, _ = fmt.Fprintf(out, " %s\n", n)
}
}

View File

@ -0,0 +1,116 @@
package hooks
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"text/template"
"github.com/spf13/cobra"
)
type HookType int
const (
NextSteps = iota
)
// HookMessage represents a plugin hook response. Plugins
// declaring support for CLI hooks need to print a json
// representation of this type when their hook subcommand
// is invoked.
type HookMessage struct {
Type HookType
Template string
}
// TemplateReplaceSubcommandName returns a hook template string
// that will be replaced by the CLI subcommand being executed
//
// Example:
//
// "you ran the subcommand: " + TemplateReplaceSubcommandName()
//
// when being executed after the command:
// `docker run --name "my-container" alpine`
// will result in the message:
// `you ran the subcommand: run`
func TemplateReplaceSubcommandName() string {
return hookTemplateCommandName
}
// TemplateReplaceFlagValue returns a hook template string
// that will be replaced by the flags value.
//
// Example:
//
// "you ran a container named: " + TemplateReplaceFlagValue("name")
//
// when being executed after the command:
// `docker run --name "my-container" alpine`
// will result in the message:
// `you ran a container named: my-container`
func TemplateReplaceFlagValue(flag string) string {
return fmt.Sprintf(hookTemplateFlagValue, flag)
}
// TemplateReplaceArg takes an index i and returns a hook
// template string that the CLI will replace the template with
// the ith argument, after processing the passed flags.
//
// Example:
//
// "run this image with `docker run " + TemplateReplaceArg(0) + "`"
//
// when being executed after the command:
// `docker pull alpine`
// will result in the message:
// "Run this image with `docker run alpine`"
func TemplateReplaceArg(i int) string {
return fmt.Sprintf(hookTemplateArg, strconv.Itoa(i))
}
func ParseTemplate(hookTemplate string, cmd *cobra.Command) ([]string, error) {
tmpl := template.New("").Funcs(commandFunctions)
tmpl, err := tmpl.Parse(hookTemplate)
if err != nil {
return nil, err
}
b := bytes.Buffer{}
err = tmpl.Execute(&b, cmd)
if err != nil {
return nil, err
}
return strings.Split(b.String(), "\n"), nil
}
var ErrHookTemplateParse = errors.New("failed to parse hook template")
const (
hookTemplateCommandName = "{{.Name}}"
hookTemplateFlagValue = `{{flag . "%s"}}`
hookTemplateArg = "{{arg . %s}}"
)
var commandFunctions = template.FuncMap{
"flag": getFlagValue,
"arg": getArgValue,
}
func getFlagValue(cmd *cobra.Command, flag string) (string, error) {
cmdFlag := cmd.Flag(flag)
if cmdFlag == nil {
return "", ErrHookTemplateParse
}
return cmdFlag.Value.String(), nil
}
func getArgValue(cmd *cobra.Command, i int) (string, error) {
flags := cmd.Flags()
if flags == nil {
return "", ErrHookTemplateParse
}
return flags.Arg(i), nil
}

View File

@ -0,0 +1,21 @@
package manager
import "os/exec"
// Candidate represents a possible plugin candidate, for mocking purposes
type Candidate interface {
Path() string
Metadata() ([]byte, error)
}
type candidate struct {
path string
}
func (c *candidate) Path() string {
return c.path
}
func (c *candidate) Metadata() ([]byte, error) {
return exec.Command(c.path, MetadataSubcommandName).Output()
}

View File

@ -0,0 +1,148 @@
package manager
import (
"fmt"
"net/url"
"os"
"strings"
"sync"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
"go.opentelemetry.io/otel/attribute"
)
const (
// CommandAnnotationPlugin is added to every stub command added by
// AddPluginCommandStubs with the value "true" and so can be
// used to distinguish plugin stubs from regular commands.
CommandAnnotationPlugin = "com.docker.cli.plugin"
// CommandAnnotationPluginVendor is added to every stub command
// added by AddPluginCommandStubs and contains the vendor of
// that plugin.
CommandAnnotationPluginVendor = "com.docker.cli.plugin.vendor"
// CommandAnnotationPluginVersion is added to every stub command
// added by AddPluginCommandStubs and contains the version of
// that plugin.
CommandAnnotationPluginVersion = "com.docker.cli.plugin.version"
// CommandAnnotationPluginInvalid is added to any stub command
// added by AddPluginCommandStubs for an invalid command (that
// is, one which failed it's candidate test) and contains the
// reason for the failure.
CommandAnnotationPluginInvalid = "com.docker.cli.plugin-invalid"
// CommandAnnotationPluginCommandPath is added to overwrite the
// command path for a plugin invocation.
CommandAnnotationPluginCommandPath = "com.docker.cli.plugin.command_path"
)
var pluginCommandStubsOnce sync.Once
// AddPluginCommandStubs adds a stub cobra.Commands for each valid and invalid
// plugin. The command stubs will have several annotations added, see
// `CommandAnnotationPlugin*`.
func AddPluginCommandStubs(dockerCli command.Cli, rootCmd *cobra.Command) (err error) {
pluginCommandStubsOnce.Do(func() {
var plugins []Plugin
plugins, err = ListPlugins(dockerCli, rootCmd)
if err != nil {
return
}
for _, p := range plugins {
p := p
vendor := p.Vendor
if vendor == "" {
vendor = "unknown"
}
annotations := map[string]string{
CommandAnnotationPlugin: "true",
CommandAnnotationPluginVendor: vendor,
CommandAnnotationPluginVersion: p.Version,
}
if p.Err != nil {
annotations[CommandAnnotationPluginInvalid] = p.Err.Error()
}
rootCmd.AddCommand(&cobra.Command{
Use: p.Name,
Short: p.ShortDescription,
Run: func(_ *cobra.Command, _ []string) {},
Annotations: annotations,
DisableFlagParsing: true,
RunE: func(cmd *cobra.Command, args []string) error {
flags := rootCmd.PersistentFlags()
flags.SetOutput(nil)
perr := flags.Parse(args)
if perr != nil {
return err
}
if flags.Changed("help") {
cmd.HelpFunc()(rootCmd, args)
return nil
}
return fmt.Errorf("docker: '%s' is not a docker command.\nSee 'docker --help'", cmd.Name())
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
// Delegate completion to plugin
cargs := []string{p.Path, cobra.ShellCompRequestCmd, p.Name}
cargs = append(cargs, args...)
cargs = append(cargs, toComplete)
os.Args = cargs
runCommand, runErr := PluginRunCommand(dockerCli, p.Name, cmd)
if runErr != nil {
return nil, cobra.ShellCompDirectiveError
}
runErr = runCommand.Run()
if runErr == nil {
os.Exit(0) // plugin already rendered complete data
}
return nil, cobra.ShellCompDirectiveError
},
})
}
})
return err
}
const (
dockerCliAttributePrefix = attribute.Key("docker.cli")
cobraCommandPath = attribute.Key("cobra.command_path")
)
func getPluginResourceAttributes(cmd *cobra.Command, plugin Plugin) attribute.Set {
commandPath := cmd.Annotations[CommandAnnotationPluginCommandPath]
if commandPath == "" {
commandPath = fmt.Sprintf("%s %s", cmd.CommandPath(), plugin.Name)
}
attrSet := attribute.NewSet(
cobraCommandPath.String(commandPath),
)
kvs := make([]attribute.KeyValue, 0, attrSet.Len())
for iter := attrSet.Iter(); iter.Next(); {
attr := iter.Attribute()
kvs = append(kvs, attribute.KeyValue{
Key: dockerCliAttributePrefix + "." + attr.Key,
Value: attr.Value,
})
}
return attribute.NewSet(kvs...)
}
func appendPluginResourceAttributesEnvvar(env []string, cmd *cobra.Command, plugin Plugin) []string {
if attrs := getPluginResourceAttributes(cmd, plugin); attrs.Len() > 0 {
// values in environment variables need to be in baggage format
// otel/baggage package can be used after update to v1.22, currently it encodes incorrectly
attrsSlice := make([]string, attrs.Len())
for iter := attrs.Iter(); iter.Next(); {
i, v := iter.IndexedAttribute()
attrsSlice[i] = string(v.Key) + "=" + url.PathEscape(v.Value.AsString())
}
env = append(env, ResourceAttributesEnvvar+"="+strings.Join(attrsSlice, ","))
}
return env
}

View File

@ -0,0 +1,54 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package manager
import (
"github.com/pkg/errors"
)
// pluginError is set as Plugin.Err by NewPlugin if the plugin
// candidate fails one of the candidate tests. This exists primarily
// to implement encoding.TextMarshaller such that rendering a plugin as JSON
// (e.g. for `docker info -f '{{json .CLIPlugins}}'`) renders the Err
// field as a useful string and not just `{}`. See
// https://github.com/golang/go/issues/10748 for some discussion
// around why the builtin error type doesn't implement this.
type pluginError struct {
cause error
}
// Error satisfies the core error interface for pluginError.
func (e *pluginError) Error() string {
return e.cause.Error()
}
// Cause satisfies the errors.causer interface for pluginError.
func (e *pluginError) Cause() error {
return e.cause
}
// Unwrap provides compatibility for Go 1.13 error chains.
func (e *pluginError) Unwrap() error {
return e.cause
}
// MarshalText marshalls the pluginError into a textual form.
func (e *pluginError) MarshalText() (text []byte, err error) {
return []byte(e.cause.Error()), nil
}
// wrapAsPluginError wraps an error in a pluginError with an
// additional message, analogous to errors.Wrapf.
func wrapAsPluginError(err error, msg string) error {
if err == nil {
return nil
}
return &pluginError{cause: errors.Wrap(err, msg)}
}
// NewPluginError creates a new pluginError, analogous to
// errors.Errorf.
func NewPluginError(msg string, args ...any) error {
return &pluginError{cause: errors.Errorf(msg, args...)}
}

View File

@ -0,0 +1,199 @@
package manager
import (
"context"
"encoding/json"
"strings"
"github.com/docker/cli/cli-plugins/hooks"
"github.com/docker/cli/cli/command"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// HookPluginData is the type representing the information
// that plugins declaring support for hooks get passed when
// being invoked following a CLI command execution.
type HookPluginData struct {
// RootCmd is a string representing the matching hook configuration
// which is currently being invoked. If a hook for `docker context` is
// configured and the user executes `docker context ls`, the plugin will
// be invoked with `context`.
RootCmd string
Flags map[string]string
CommandError string
}
// RunCLICommandHooks is the entrypoint into the hooks execution flow after
// a main CLI command was executed. It calls the hook subcommand for all
// present CLI plugins that declare support for hooks in their metadata and
// parses/prints their responses.
func RunCLICommandHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, cmdErrorMessage string) {
commandName := strings.TrimPrefix(subCommand.CommandPath(), rootCmd.Name()+" ")
flags := getCommandFlags(subCommand)
runHooks(ctx, dockerCli, rootCmd, subCommand, commandName, flags, cmdErrorMessage)
}
// RunPluginHooks is the entrypoint for the hooks execution flow
// after a plugin command was just executed by the CLI.
func RunPluginHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, args []string) {
commandName := strings.Join(args, " ")
flags := getNaiveFlags(args)
runHooks(ctx, dockerCli, rootCmd, subCommand, commandName, flags, "")
}
func runHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCommand *cobra.Command, invokedCommand string, flags map[string]string, cmdErrorMessage string) {
nextSteps := invokeAndCollectHooks(ctx, dockerCli, rootCmd, subCommand, invokedCommand, flags, cmdErrorMessage)
hooks.PrintNextSteps(dockerCli.Err(), nextSteps)
}
func invokeAndCollectHooks(ctx context.Context, dockerCli command.Cli, rootCmd, subCmd *cobra.Command, subCmdStr string, flags map[string]string, cmdErrorMessage string) []string {
// check if the context was cancelled before invoking hooks
select {
case <-ctx.Done():
return nil
default:
}
pluginsCfg := dockerCli.ConfigFile().Plugins
if pluginsCfg == nil {
return nil
}
nextSteps := make([]string, 0, len(pluginsCfg))
for pluginName, cfg := range pluginsCfg {
match, ok := pluginMatch(cfg, subCmdStr)
if !ok {
continue
}
p, err := GetPlugin(pluginName, dockerCli, rootCmd)
if err != nil {
continue
}
hookReturn, err := p.RunHook(ctx, HookPluginData{
RootCmd: match,
Flags: flags,
CommandError: cmdErrorMessage,
})
if err != nil {
// skip misbehaving plugins, but don't halt execution
continue
}
var hookMessageData hooks.HookMessage
err = json.Unmarshal(hookReturn, &hookMessageData)
if err != nil {
continue
}
// currently the only hook type
if hookMessageData.Type != hooks.NextSteps {
continue
}
processedHook, err := hooks.ParseTemplate(hookMessageData.Template, subCmd)
if err != nil {
continue
}
var appended bool
nextSteps, appended = appendNextSteps(nextSteps, processedHook)
if !appended {
logrus.Debugf("Plugin %s responded with an empty hook message %q. Ignoring.", pluginName, string(hookReturn))
}
}
return nextSteps
}
// appendNextSteps appends the processed hook output to the nextSteps slice.
// If the processed hook output is empty, it is not appended.
// Empty lines are not stripped if there's at least one non-empty line.
func appendNextSteps(nextSteps []string, processed []string) ([]string, bool) {
empty := true
for _, l := range processed {
if strings.TrimSpace(l) != "" {
empty = false
break
}
}
if empty {
return nextSteps, false
}
return append(nextSteps, processed...), true
}
// pluginMatch takes a plugin configuration and a string representing the
// command being executed (such as 'image ls' the root 'docker' is omitted)
// and, if the configuration includes a hook for the invoked command, returns
// the configured hook string.
func pluginMatch(pluginCfg map[string]string, subCmd string) (string, bool) {
configuredPluginHooks, ok := pluginCfg["hooks"]
if !ok || configuredPluginHooks == "" {
return "", false
}
commands := strings.Split(configuredPluginHooks, ",")
for _, hookCmd := range commands {
if hookMatch(hookCmd, subCmd) {
return hookCmd, true
}
}
return "", false
}
func hookMatch(hookCmd, subCmd string) bool {
hookCmdTokens := strings.Split(hookCmd, " ")
subCmdTokens := strings.Split(subCmd, " ")
if len(hookCmdTokens) > len(subCmdTokens) {
return false
}
for i, v := range hookCmdTokens {
if v != subCmdTokens[i] {
return false
}
}
return true
}
func getCommandFlags(cmd *cobra.Command) map[string]string {
flags := make(map[string]string)
cmd.Flags().Visit(func(f *pflag.Flag) {
var fValue string
if f.Value.Type() == "bool" {
fValue = f.Value.String()
}
flags[f.Name] = fValue
})
return flags
}
// getNaiveFlags string-matches argv and parses them into a map.
// This is used when calling hooks after a plugin command, since
// in this case we can't rely on the cobra command tree to parse
// flags in this case. In this case, no values are ever passed,
// since we don't have enough information to process them.
func getNaiveFlags(args []string) map[string]string {
flags := make(map[string]string)
for _, arg := range args {
if strings.HasPrefix(arg, "--") {
flags[arg[2:]] = ""
continue
}
if strings.HasPrefix(arg, "-") {
flags[arg[1:]] = ""
}
}
return flags
}

View File

@ -0,0 +1,264 @@
package manager
import (
"context"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/fvbommel/sortorder"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
// ReexecEnvvar is the name of an ennvar which is set to the command
// used to originally invoke the docker CLI when executing a
// plugin. Assuming $PATH and $CWD remain unchanged this should allow
// the plugin to re-execute the original CLI.
ReexecEnvvar = "DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"
// ResourceAttributesEnvvar is the name of the envvar that includes additional
// resource attributes for OTEL.
ResourceAttributesEnvvar = "OTEL_RESOURCE_ATTRIBUTES"
)
// errPluginNotFound is the error returned when a plugin could not be found.
type errPluginNotFound string
func (e errPluginNotFound) NotFound() {}
func (e errPluginNotFound) Error() string {
return "Error: No such CLI plugin: " + string(e)
}
type notFound interface{ NotFound() }
// IsNotFound is true if the given error is due to a plugin not being found.
func IsNotFound(err error) bool {
if e, ok := err.(*pluginError); ok {
err = e.Cause()
}
_, ok := err.(notFound)
return ok
}
// getPluginDirs returns the platform-specific locations to search for plugins
// in order of preference.
//
// Plugin-discovery is performed in the following order of preference:
//
// 1. The "cli-plugins" directory inside the CLIs [config.Path] (usually "~/.docker/cli-plugins").
// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
// 3. Platform-specific defaultSystemPluginDirs.
//
// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
func getPluginDirs(cfg *configfile.ConfigFile) ([]string, error) {
var pluginDirs []string
if cfg != nil {
pluginDirs = append(pluginDirs, cfg.CLIPluginsExtraDirs...)
}
pluginDir, err := config.Path("cli-plugins")
if err != nil {
return nil, err
}
pluginDirs = append(pluginDirs, pluginDir)
pluginDirs = append(pluginDirs, defaultSystemPluginDirs...)
return pluginDirs, nil
}
func addPluginCandidatesFromDir(res map[string][]string, d string) error {
dentries, err := os.ReadDir(d)
if err != nil {
return err
}
for _, dentry := range dentries {
switch dentry.Type() & os.ModeType {
case 0, os.ModeSymlink:
// Regular file or symlink, keep going
default:
// Something else, ignore.
continue
}
name := dentry.Name()
if !strings.HasPrefix(name, NamePrefix) {
continue
}
name = strings.TrimPrefix(name, NamePrefix)
var err error
if name, err = trimExeSuffix(name); err != nil {
continue
}
res[name] = append(res[name], filepath.Join(d, dentry.Name()))
}
return nil
}
// listPluginCandidates returns a map from plugin name to the list of (unvalidated) Candidates. The list is in descending order of priority.
func listPluginCandidates(dirs []string) (map[string][]string, error) {
result := make(map[string][]string)
for _, d := range dirs {
// Silently ignore any directories which we cannot
// Stat (e.g. due to permissions or anything else) or
// which is not a directory.
if fi, err := os.Stat(d); err != nil || !fi.IsDir() {
continue
}
if err := addPluginCandidatesFromDir(result, d); err != nil {
// Silently ignore paths which don't exist.
if os.IsNotExist(err) {
continue
}
return nil, err // Or return partial result?
}
}
return result, nil
}
// GetPlugin returns a plugin on the system by its name
func GetPlugin(name string, dockerCli command.Cli, rootcmd *cobra.Command) (*Plugin, error) {
pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
if err != nil {
return nil, err
}
candidates, err := listPluginCandidates(pluginDirs)
if err != nil {
return nil, err
}
if paths, ok := candidates[name]; ok {
if len(paths) == 0 {
return nil, errPluginNotFound(name)
}
c := &candidate{paths[0]}
p, err := newPlugin(c, rootcmd.Commands())
if err != nil {
return nil, err
}
if !IsNotFound(p.Err) {
p.ShadowedPaths = paths[1:]
}
return &p, nil
}
return nil, errPluginNotFound(name)
}
// ListPlugins produces a list of the plugins available on the system
func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
if err != nil {
return nil, err
}
candidates, err := listPluginCandidates(pluginDirs)
if err != nil {
return nil, err
}
var plugins []Plugin
var mu sync.Mutex
eg, _ := errgroup.WithContext(context.TODO())
cmds := rootcmd.Commands()
for _, paths := range candidates {
func(paths []string) {
eg.Go(func() error {
if len(paths) == 0 {
return nil
}
c := &candidate{paths[0]}
p, err := newPlugin(c, cmds)
if err != nil {
return err
}
if !IsNotFound(p.Err) {
p.ShadowedPaths = paths[1:]
mu.Lock()
defer mu.Unlock()
plugins = append(plugins, p)
}
return nil
})
}(paths)
}
if err := eg.Wait(); err != nil {
return nil, err
}
sort.Slice(plugins, func(i, j int) bool {
return sortorder.NaturalLess(plugins[i].Name, plugins[j].Name)
})
return plugins, nil
}
// PluginRunCommand returns an "os/exec".Cmd which when .Run() will execute the named plugin.
// The rootcmd argument is referenced to determine the set of builtin commands in order to detect conficts.
// The error returned satisfies the IsNotFound() predicate if no plugin was found or if the first candidate plugin was invalid somehow.
func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command) (*exec.Cmd, error) {
// This uses the full original args, not the args which may
// have been provided by cobra to our caller. This is because
// they lack e.g. global options which we must propagate here.
args := os.Args[1:]
if !pluginNameRe.MatchString(name) {
// We treat this as "not found" so that callers will
// fallback to their "invalid" command path.
return nil, errPluginNotFound(name)
}
exename := addExeSuffix(NamePrefix + name)
pluginDirs, err := getPluginDirs(dockerCli.ConfigFile())
if err != nil {
return nil, err
}
for _, d := range pluginDirs {
path := filepath.Join(d, exename)
// We stat here rather than letting the exec tell us
// ENOENT because the latter does not distinguish a
// file not existing from its dynamic loader or one of
// its libraries not existing.
if _, err := os.Stat(path); os.IsNotExist(err) {
continue
}
c := &candidate{path: path}
plugin, err := newPlugin(c, rootcmd.Commands())
if err != nil {
return nil, err
}
if plugin.Err != nil {
// TODO: why are we not returning plugin.Err?
return nil, errPluginNotFound(name)
}
cmd := exec.Command(plugin.Path, args...)
// Using dockerCli.{In,Out,Err}() here results in a hang until something is input.
// See: - https://github.com/golang/go/issues/10338
// - https://github.com/golang/go/commit/d000e8742a173aa0659584aa01b7ba2834ba28ab
// os.Stdin is a *os.File which avoids this behaviour. We don't need the functionality
// of the wrappers here anyway.
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(cmd.Environ(), ReexecEnvvar+"="+os.Args[0])
cmd.Env = appendPluginResourceAttributesEnvvar(cmd.Env, rootcmd, plugin)
return cmd, nil
}
return nil, errPluginNotFound(name)
}
// IsPluginCommand checks if the given cmd is a plugin-stub.
func IsPluginCommand(cmd *cobra.Command) bool {
return cmd.Annotations[CommandAnnotationPlugin] == "true"
}

View File

@ -0,0 +1,20 @@
//go:build !windows
package manager
// defaultSystemPluginDirs are the platform-specific locations to search
// for plugins in order of preference.
//
// Plugin-discovery is performed in the following order of preference:
//
// 1. The "cli-plugins" directory inside the CLIs config-directory (usually "~/.docker/cli-plugins").
// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
// 3. Platform-specific defaultSystemPluginDirs (as defined below).
//
// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
var defaultSystemPluginDirs = []string{
"/usr/local/lib/docker/cli-plugins",
"/usr/local/libexec/docker/cli-plugins",
"/usr/lib/docker/cli-plugins",
"/usr/libexec/docker/cli-plugins",
}

View File

@ -0,0 +1,21 @@
package manager
import (
"os"
"path/filepath"
)
// defaultSystemPluginDirs are the platform-specific locations to search
// for plugins in order of preference.
//
// Plugin-discovery is performed in the following order of preference:
//
// 1. The "cli-plugins" directory inside the CLIs config-directory (usually "~/.docker/cli-plugins").
// 2. Additional plugin directories as configured through [ConfigFile.CLIPluginsExtraDirs].
// 3. Platform-specific defaultSystemPluginDirs (as defined below).
//
// [ConfigFile.CLIPluginsExtraDirs]: https://pkg.go.dev/github.com/docker/cli@v26.1.4+incompatible/cli/config/configfile#ConfigFile.CLIPluginsExtraDirs
var defaultSystemPluginDirs = []string{
filepath.Join(os.Getenv("ProgramData"), "Docker", "cli-plugins"),
filepath.Join(os.Getenv("ProgramFiles"), "Docker", "cli-plugins"),
}

View File

@ -0,0 +1,30 @@
package manager
const (
// NamePrefix is the prefix required on all plugin binary names
NamePrefix = "docker-"
// MetadataSubcommandName is the name of the plugin subcommand
// which must be supported by every plugin and returns the
// plugin metadata.
MetadataSubcommandName = "docker-cli-plugin-metadata"
// HookSubcommandName is the name of the plugin subcommand
// which must be implemented by plugins declaring support
// for hooks in their metadata.
HookSubcommandName = "docker-cli-plugin-hooks"
)
// Metadata provided by the plugin.
type Metadata struct {
// SchemaVersion describes the version of this struct. Mandatory, must be "0.1.0"
SchemaVersion string `json:",omitempty"`
// Vendor is the name of the plugin vendor. Mandatory
Vendor string `json:",omitempty"`
// Version is the optional version of this plugin.
Version string `json:",omitempty"`
// ShortDescription should be suitable for a single line help message.
ShortDescription string `json:",omitempty"`
// URL is a pointer to the plugin's homepage.
URL string `json:",omitempty"`
}

View File

@ -0,0 +1,124 @@
package manager
import (
"context"
"encoding/json"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var pluginNameRe = regexp.MustCompile("^[a-z][a-z0-9]*$")
// Plugin represents a potential plugin with all it's metadata.
type Plugin struct {
Metadata
Name string `json:",omitempty"`
Path string `json:",omitempty"`
// Err is non-nil if the plugin failed one of the candidate tests.
Err error `json:",omitempty"`
// ShadowedPaths contains the paths of any other plugins which this plugin takes precedence over.
ShadowedPaths []string `json:",omitempty"`
}
// newPlugin determines if the given candidate is valid and returns a
// Plugin. If the candidate fails one of the tests then `Plugin.Err`
// is set, and is always a `pluginError`, but the `Plugin` is still
// returned with no error. An error is only returned due to a
// non-recoverable error.
func newPlugin(c Candidate, cmds []*cobra.Command) (Plugin, error) {
path := c.Path()
if path == "" {
return Plugin{}, errors.New("plugin candidate path cannot be empty")
}
// The candidate listing process should have skipped anything
// which would fail here, so there are all real errors.
fullname := filepath.Base(path)
if fullname == "." {
return Plugin{}, errors.Errorf("unable to determine basename of plugin candidate %q", path)
}
var err error
if fullname, err = trimExeSuffix(fullname); err != nil {
return Plugin{}, errors.Wrapf(err, "plugin candidate %q", path)
}
if !strings.HasPrefix(fullname, NamePrefix) {
return Plugin{}, errors.Errorf("plugin candidate %q: does not have %q prefix", path, NamePrefix)
}
p := Plugin{
Name: strings.TrimPrefix(fullname, NamePrefix),
Path: path,
}
// Now apply the candidate tests, so these update p.Err.
if !pluginNameRe.MatchString(p.Name) {
p.Err = NewPluginError("plugin candidate %q did not match %q", p.Name, pluginNameRe.String())
return p, nil
}
for _, cmd := range cmds {
// Ignore conflicts with commands which are
// just plugin stubs (i.e. from a previous
// call to AddPluginCommandStubs).
if IsPluginCommand(cmd) {
continue
}
if cmd.Name() == p.Name {
p.Err = NewPluginError("plugin %q duplicates builtin command", p.Name)
return p, nil
}
if cmd.HasAlias(p.Name) {
p.Err = NewPluginError("plugin %q duplicates an alias of builtin command %q", p.Name, cmd.Name())
return p, nil
}
}
// We are supposed to check for relevant execute permissions here. Instead we rely on an attempt to execute.
meta, err := c.Metadata()
if err != nil {
p.Err = wrapAsPluginError(err, "failed to fetch metadata")
return p, nil
}
if err := json.Unmarshal(meta, &p.Metadata); err != nil {
p.Err = wrapAsPluginError(err, "invalid metadata")
return p, nil
}
if p.Metadata.SchemaVersion != "0.1.0" {
p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion)
return p, nil
}
if p.Metadata.Vendor == "" {
p.Err = NewPluginError("plugin metadata does not define a vendor")
return p, nil
}
return p, nil
}
// RunHook executes the plugin's hooks command
// and returns its unprocessed output.
func (p *Plugin) RunHook(ctx context.Context, hookData HookPluginData) ([]byte, error) {
hDataBytes, err := json.Marshal(hookData)
if err != nil {
return nil, wrapAsPluginError(err, "failed to marshall hook data")
}
pCmd := exec.CommandContext(ctx, p.Path, p.Name, HookSubcommandName, string(hDataBytes))
pCmd.Env = os.Environ()
pCmd.Env = append(pCmd.Env, ReexecEnvvar+"="+os.Args[0])
hookCmdOutput, err := pCmd.Output()
if err != nil {
return nil, wrapAsPluginError(err, "failed to execute plugin hook subcommand")
}
return hookCmdOutput, nil
}

View File

@ -0,0 +1,11 @@
//go:build !windows
package manager
func trimExeSuffix(s string) (string, error) {
return s, nil
}
func addExeSuffix(s string) string {
return s
}

View File

@ -0,0 +1,26 @@
package manager
import (
"path/filepath"
"strings"
"github.com/pkg/errors"
)
// This is made slightly more complex due to needing to be case insensitive.
func trimExeSuffix(s string) (string, error) {
ext := filepath.Ext(s)
if ext == "" {
return "", errors.Errorf("path %q lacks required file extension", s)
}
exe := ".exe"
if !strings.EqualFold(ext, exe) {
return "", errors.Errorf("path %q lacks required %q suffix", s, exe)
}
return strings.TrimSuffix(s, ext), nil
}
func addExeSuffix(s string) string {
return s + ".exe"
}

525
vendor/github.com/docker/cli/cli/cobra.go generated vendored Normal file
View File

@ -0,0 +1,525 @@
package cli
import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
pluginmanager "github.com/docker/cli/cli-plugins/manager"
"github.com/docker/cli/cli/command"
cliflags "github.com/docker/cli/cli/flags"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/registry"
"github.com/fvbommel/sortorder"
"github.com/moby/term"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// setupCommonRootCommand contains the setup common to
// SetupRootCommand and SetupPluginRootCommand.
func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *cobra.Command) {
opts := cliflags.NewClientOptions()
opts.InstallFlags(rootCmd.Flags())
cobra.AddTemplateFunc("add", func(a, b int) int { return a + b })
cobra.AddTemplateFunc("hasAliases", hasAliases)
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
cobra.AddTemplateFunc("hasTopCommands", hasTopCommands)
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
cobra.AddTemplateFunc("hasSwarmSubCommands", hasSwarmSubCommands)
cobra.AddTemplateFunc("hasInvalidPlugins", hasInvalidPlugins)
cobra.AddTemplateFunc("topCommands", topCommands)
cobra.AddTemplateFunc("commandAliases", commandAliases)
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
cobra.AddTemplateFunc("orchestratorSubCommands", orchestratorSubCommands)
cobra.AddTemplateFunc("invalidPlugins", invalidPlugins)
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
cobra.AddTemplateFunc("vendorAndVersion", vendorAndVersion)
cobra.AddTemplateFunc("invalidPluginReason", invalidPluginReason)
cobra.AddTemplateFunc("isPlugin", isPlugin)
cobra.AddTemplateFunc("isExperimental", isExperimental)
cobra.AddTemplateFunc("hasAdditionalHelp", hasAdditionalHelp)
cobra.AddTemplateFunc("additionalHelp", additionalHelp)
cobra.AddTemplateFunc("decoratedName", decoratedName)
rootCmd.SetUsageTemplate(usageTemplate)
rootCmd.SetHelpTemplate(helpTemplate)
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
rootCmd.SetHelpCommand(helpCommand)
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "use --help")
rootCmd.PersistentFlags().Lookup("help").Hidden = true
rootCmd.Annotations = map[string]string{
"additionalHelp": "For more help on how to use Docker, head to https://docs.docker.com/go/guides/",
"docs.code-delimiter": `"`, // https://github.com/docker/cli-docs-tool/blob/77abede22166eaea4af7335096bdcedd043f5b19/annotation/annotation.go#L20-L22
}
// Configure registry.CertsDir() when running in rootless-mode
if os.Getenv("ROOTLESSKIT_STATE_DIR") != "" {
if configHome, err := homedir.GetConfigHome(); err == nil {
registry.SetCertsDir(filepath.Join(configHome, "docker/certs.d"))
}
}
return opts, helpCommand
}
// SetupRootCommand sets default usage, help, and error handling for the
// root command.
func SetupRootCommand(rootCmd *cobra.Command) (opts *cliflags.ClientOptions, helpCmd *cobra.Command) {
rootCmd.SetVersionTemplate("Docker version {{.Version}}\n")
return setupCommonRootCommand(rootCmd)
}
// SetupPluginRootCommand sets default usage, help and error handling for a plugin root command.
func SetupPluginRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet) {
opts, _ := setupCommonRootCommand(rootCmd)
return opts, rootCmd.Flags()
}
// FlagErrorFunc prints an error message which matches the format of the
// docker/cli/cli error messages
func FlagErrorFunc(cmd *cobra.Command, err error) error {
if err == nil {
return nil
}
usage := ""
if cmd.HasSubCommands() {
usage = "\n\n" + cmd.UsageString()
}
return StatusError{
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
StatusCode: 125,
}
}
// TopLevelCommand encapsulates a top-level cobra command (either
// docker CLI or a plugin) and global flag handling logic necessary
// for plugins.
type TopLevelCommand struct {
cmd *cobra.Command
dockerCli *command.DockerCli
opts *cliflags.ClientOptions
flags *pflag.FlagSet
args []string
}
// NewTopLevelCommand returns a new TopLevelCommand object
func NewTopLevelCommand(cmd *cobra.Command, dockerCli *command.DockerCli, opts *cliflags.ClientOptions, flags *pflag.FlagSet) *TopLevelCommand {
return &TopLevelCommand{
cmd: cmd,
dockerCli: dockerCli,
opts: opts,
flags: flags,
args: os.Args[1:],
}
}
// SetArgs sets the args (default os.Args[:1] used to invoke the command
func (tcmd *TopLevelCommand) SetArgs(args []string) {
tcmd.args = args
tcmd.cmd.SetArgs(args)
}
// SetFlag sets a flag in the local flag set of the top-level command
func (tcmd *TopLevelCommand) SetFlag(name, value string) {
tcmd.cmd.Flags().Set(name, value)
}
// HandleGlobalFlags takes care of parsing global flags defined on the
// command, it returns the underlying cobra command and the args it
// will be called with (or an error).
//
// On success the caller is responsible for calling Initialize()
// before calling `Execute` on the returned command.
func (tcmd *TopLevelCommand) HandleGlobalFlags() (*cobra.Command, []string, error) {
cmd := tcmd.cmd
// We manually parse the global arguments and find the
// subcommand in order to properly deal with plugins. We rely
// on the root command never having any non-flag arguments. We
// create our own FlagSet so that we can configure it
// (e.g. `SetInterspersed` below) in an idempotent way.
flags := pflag.NewFlagSet(cmd.Name(), pflag.ContinueOnError)
// We need !interspersed to ensure we stop at the first
// potential command instead of accumulating it into
// flags.Args() and then continuing on and finding other
// arguments which we try and treat as globals (when they are
// actually arguments to the subcommand).
flags.SetInterspersed(false)
// We need the single parse to see both sets of flags.
flags.AddFlagSet(cmd.Flags())
flags.AddFlagSet(cmd.PersistentFlags())
// Now parse the global flags, up to (but not including) the
// first command. The result will be that all the remaining
// arguments are in `flags.Args()`.
if err := flags.Parse(tcmd.args); err != nil {
// Our FlagErrorFunc uses the cli, make sure it is initialized
if err := tcmd.Initialize(); err != nil {
return nil, nil, err
}
return nil, nil, cmd.FlagErrorFunc()(cmd, err)
}
return cmd, flags.Args(), nil
}
// Initialize finalises global option parsing and initializes the docker client.
func (tcmd *TopLevelCommand) Initialize(ops ...command.CLIOption) error {
tcmd.opts.SetDefaultOptions(tcmd.flags)
return tcmd.dockerCli.Initialize(tcmd.opts, ops...)
}
// VisitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func VisitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
VisitAll(cmd, fn)
}
fn(root)
}
// DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all
// commands within the tree rooted at cmd.
func DisableFlagsInUseLine(cmd *cobra.Command) {
VisitAll(cmd, func(ccmd *cobra.Command) {
// do not add a `[flags]` to the end of the usage line.
ccmd.DisableFlagsInUseLine = true
})
}
// HasCompletionArg returns true if a cobra completion arg request is found.
func HasCompletionArg(args []string) bool {
for _, arg := range args {
if arg == cobra.ShellCompRequestCmd || arg == cobra.ShellCompNoDescRequestCmd {
return true
}
}
return false
}
var helpCommand = &cobra.Command{
Use: "help [command]",
Short: "Help about the command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
RunE: func(c *cobra.Command, args []string) error {
cmd, args, e := c.Root().Find(args)
if cmd == nil || e != nil || len(args) > 0 {
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
}
helpFunc := cmd.HelpFunc()
helpFunc(cmd, args)
return nil
},
}
func isExperimental(cmd *cobra.Command) bool {
if _, ok := cmd.Annotations["experimentalCLI"]; ok {
return true
}
var experimental bool
cmd.VisitParents(func(cmd *cobra.Command) {
if _, ok := cmd.Annotations["experimentalCLI"]; ok {
experimental = true
}
})
return experimental
}
func additionalHelp(cmd *cobra.Command) string {
if msg, ok := cmd.Annotations["additionalHelp"]; ok {
out := cmd.OutOrStderr()
if _, isTerminal := term.GetFdInfo(out); !isTerminal {
return msg
}
style := aec.EmptyBuilder.Bold().ANSI
return style.Apply(msg)
}
return ""
}
func hasAdditionalHelp(cmd *cobra.Command) bool {
return additionalHelp(cmd) != ""
}
func isPlugin(cmd *cobra.Command) bool {
return pluginmanager.IsPluginCommand(cmd)
}
func hasAliases(cmd *cobra.Command) bool {
return len(cmd.Aliases) > 0 || cmd.Annotations["aliases"] != ""
}
func hasSubCommands(cmd *cobra.Command) bool {
return len(operationSubCommands(cmd)) > 0
}
func hasManagementSubCommands(cmd *cobra.Command) bool {
return len(managementSubCommands(cmd)) > 0
}
func hasSwarmSubCommands(cmd *cobra.Command) bool {
return len(orchestratorSubCommands(cmd)) > 0
}
func hasInvalidPlugins(cmd *cobra.Command) bool {
return len(invalidPlugins(cmd)) > 0
}
func hasTopCommands(cmd *cobra.Command) bool {
return len(topCommands(cmd)) > 0
}
// commandAliases is a templating function to return aliases for the command,
// formatted as the full command as they're called (contrary to the default
// Aliases function, which only returns the subcommand).
func commandAliases(cmd *cobra.Command) string {
if cmd.Annotations["aliases"] != "" {
return cmd.Annotations["aliases"]
}
var parentPath string
if cmd.HasParent() {
parentPath = cmd.Parent().CommandPath() + " "
}
aliases := cmd.CommandPath()
for _, alias := range cmd.Aliases {
aliases += ", " + parentPath + alias
}
return aliases
}
func topCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
if cmd.Parent() != nil {
// for now, only use top-commands for the root-command, and skip
// for sub-commands
return cmds
}
for _, sub := range cmd.Commands() {
if isPlugin(sub) || !sub.IsAvailableCommand() {
continue
}
if _, ok := sub.Annotations["category-top"]; ok {
cmds = append(cmds, sub)
}
}
sort.SliceStable(cmds, func(i, j int) bool {
return sortorder.NaturalLess(cmds[i].Annotations["category-top"], cmds[j].Annotations["category-top"])
})
return cmds
}
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if isPlugin(sub) {
continue
}
if _, ok := sub.Annotations["category-top"]; ok {
if cmd.Parent() == nil {
// for now, only use top-commands for the root-command
continue
}
}
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
func wrappedFlagUsages(cmd *cobra.Command) string {
width := 80
if ws, err := term.GetWinsize(0); err == nil {
width = int(ws.Width)
}
return cmd.Flags().FlagUsagesWrapped(width - 1)
}
func decoratedName(cmd *cobra.Command) string {
decoration := " "
if isPlugin(cmd) {
decoration = "*"
}
return cmd.Name() + decoration
}
func vendorAndVersion(cmd *cobra.Command) string {
if vendor, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVendor]; ok && isPlugin(cmd) {
version := ""
if v, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVersion]; ok && v != "" {
version = ", " + v
}
return fmt.Sprintf("(%s%s)", vendor, version)
}
return ""
}
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range allManagementSubCommands(cmd) {
if _, ok := sub.Annotations["swarm"]; ok {
continue
}
cmds = append(cmds, sub)
}
return cmds
}
func orchestratorSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range allManagementSubCommands(cmd) {
if _, ok := sub.Annotations["swarm"]; ok {
cmds = append(cmds, sub)
}
}
return cmds
}
func allManagementSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if isPlugin(sub) {
if invalidPluginReason(sub) == "" {
cmds = append(cmds, sub)
}
continue
}
if sub.IsAvailableCommand() && sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
func invalidPlugins(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if !isPlugin(sub) {
continue
}
if invalidPluginReason(sub) != "" {
cmds = append(cmds, sub)
}
}
return cmds
}
func invalidPluginReason(cmd *cobra.Command) string {
return cmd.Annotations[pluginmanager.CommandAnnotationPluginInvalid]
}
const usageTemplate = `Usage:
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}}
{{if ne .Long ""}}{{ .Long | trim }}{{ else }}{{ .Short | trim }}{{end}}
{{- if isExperimental .}}
EXPERIMENTAL:
{{.CommandPath}} is an experimental feature.
Experimental features provide early access to product functionality. These
features may change between releases without warning, or can be removed from a
future release. Learn more about experimental features in our documentation:
https://docs.docker.com/go/experimental/
{{- end}}
{{- if hasAliases . }}
Aliases:
{{ commandAliases . }}
{{- end}}
{{- if .HasExample}}
Examples:
{{ .Example }}
{{- end}}
{{- if .HasParent}}
{{- if .HasAvailableFlags}}
Options:
{{ wrappedFlagUsages . | trimRightSpace}}
{{- end}}
{{- end}}
{{- if hasTopCommands .}}
Common Commands:
{{- range topCommands .}}
{{rpad (decoratedName .) (add .NamePadding 1)}}{{.Short}}
{{- end}}
{{- end}}
{{- if hasManagementSubCommands . }}
Management Commands:
{{- range managementSubCommands . }}
{{rpad (decoratedName .) (add .NamePadding 1)}}{{.Short}}
{{- end}}
{{- end}}
{{- if hasSwarmSubCommands . }}
Swarm Commands:
{{- range orchestratorSubCommands . }}
{{rpad (decoratedName .) (add .NamePadding 1)}}{{.Short}}
{{- end}}
{{- end}}
{{- if hasSubCommands .}}
Commands:
{{- range operationSubCommands . }}
{{rpad .Name .NamePadding }} {{.Short}}
{{- end}}
{{- end}}
{{- if hasInvalidPlugins . }}
Invalid Plugins:
{{- range invalidPlugins . }}
{{rpad .Name .NamePadding }} {{invalidPluginReason .}}
{{- end}}
{{- end}}
{{- if not .HasParent}}
{{- if .HasAvailableFlags}}
Global Options:
{{ wrappedFlagUsages . | trimRightSpace}}
{{- end}}
{{- end}}
{{- if .HasSubCommands }}
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
{{- end}}
{{- if hasAdditionalHelp .}}
{{ additionalHelp . }}
{{- end}}
`
const helpTemplate = `
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`

593
vendor/github.com/docker/cli/cli/command/cli.go generated vendored Normal file
View File

@ -0,0 +1,593 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package command
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
"time"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
dcontext "github.com/docker/cli/cli/context"
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/store"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/streams"
"github.com/docker/cli/cli/trust"
"github.com/docker/cli/cli/version"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
notaryclient "github.com/theupdateframework/notary/client"
)
const defaultInitTimeout = 2 * time.Second
// Streams is an interface which exposes the standard input and output streams
type Streams interface {
In() *streams.In
Out() *streams.Out
Err() *streams.Out
}
// Cli represents the docker command line client.
type Cli interface {
Client() client.APIClient
Streams
SetIn(in *streams.In)
Apply(ops ...CLIOption) error
ConfigFile() *configfile.ConfigFile
ServerInfo() ServerInfo
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
DefaultVersion() string
CurrentVersion() string
ManifestStore() manifeststore.Store
RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
BuildKitEnabled() (bool, error)
ContextStore() store.Store
CurrentContext() string
DockerEndpoint() docker.Endpoint
TelemetryClient
}
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
options *cliflags.ClientOptions
in *streams.In
out *streams.Out
err *streams.Out
client client.APIClient
serverInfo ServerInfo
contentTrust bool
contextStore store.Store
currentContext string
init sync.Once
initErr error
dockerEndpoint docker.Endpoint
contextStoreConfig store.Config
initTimeout time.Duration
res telemetryResource
// baseCtx is the base context used for internal operations. In the future
// this may be replaced by explicitly passing a context to functions that
// need it.
baseCtx context.Context
enableGlobalMeter, enableGlobalTracer bool
}
// DefaultVersion returns api.defaultVersion.
func (cli *DockerCli) DefaultVersion() string {
return api.DefaultVersion
}
// CurrentVersion returns the API version currently negotiated, or the default
// version otherwise.
func (cli *DockerCli) CurrentVersion() string {
_ = cli.initialize()
if cli.client == nil {
return api.DefaultVersion
}
return cli.client.ClientVersion()
}
// Client returns the APIClient
func (cli *DockerCli) Client() client.APIClient {
if err := cli.initialize(); err != nil {
_, _ = fmt.Fprintf(cli.Err(), "Failed to initialize: %s\n", err)
os.Exit(1)
}
return cli.client
}
// Out returns the writer used for stdout
func (cli *DockerCli) Out() *streams.Out {
return cli.out
}
// Err returns the writer used for stderr
func (cli *DockerCli) Err() *streams.Out {
return cli.err
}
// SetIn sets the reader used for stdin
func (cli *DockerCli) SetIn(in *streams.In) {
cli.in = in
}
// In returns the reader used for stdin
func (cli *DockerCli) In() *streams.In {
return cli.in
}
// ShowHelp shows the command help.
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
cmd.SetOut(err)
cmd.HelpFunc()(cmd, args)
return nil
}
}
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
// TODO(thaJeztah): when would this happen? Is this only in tests (where cli.Initialize() is not called first?)
if cli.configFile == nil {
cli.configFile = config.LoadDefaultConfigFile(cli.err)
}
return cli.configFile
}
// ServerInfo returns the server version details for the host this client is
// connected to
func (cli *DockerCli) ServerInfo() ServerInfo {
_ = cli.initialize()
return cli.serverInfo
}
// ContentTrustEnabled returns whether content trust has been enabled by an
// environment variable.
func (cli *DockerCli) ContentTrustEnabled() bool {
return cli.contentTrust
}
// BuildKitEnabled returns buildkit is enabled or not.
func (cli *DockerCli) BuildKitEnabled() (bool, error) {
// use DOCKER_BUILDKIT env var value if set and not empty
if v := os.Getenv("DOCKER_BUILDKIT"); v != "" {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
return enabled, nil
}
// if a builder alias is defined, we are using BuildKit
aliasMap := cli.ConfigFile().Aliases
if _, ok := aliasMap["builder"]; ok {
return true, nil
}
si := cli.ServerInfo()
if si.BuildkitVersion == types.BuilderBuildKit {
// The daemon advertised BuildKit as the preferred builder; this may
// be either a Linux daemon or a Windows daemon with experimental
// BuildKit support enabled.
return true, nil
}
// otherwise, assume BuildKit is enabled for Linux, but disabled for
// Windows / WCOW, which does not yet support BuildKit by default.
return si.OSType != "windows", nil
}
// HooksEnabled returns whether plugin hooks are enabled.
func (cli *DockerCli) HooksEnabled() bool {
// legacy support DOCKER_CLI_HINTS env var
if v := os.Getenv("DOCKER_CLI_HINTS"); v != "" {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false
}
return enabled
}
// use DOCKER_CLI_HOOKS env var value if set and not empty
if v := os.Getenv("DOCKER_CLI_HOOKS"); v != "" {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false
}
return enabled
}
featuresMap := cli.ConfigFile().Features
if v, ok := featuresMap["hooks"]; ok {
enabled, err := strconv.ParseBool(v)
if err != nil {
return false
}
return enabled
}
// default to false
return false
}
// ManifestStore returns a store for local manifests
func (cli *DockerCli) ManifestStore() manifeststore.Store {
// TODO: support override default location from config file
return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
}
// RegistryClient returns a client for communicating with a Docker distribution
// registry
func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
resolver := func(ctx context.Context, index *registry.IndexInfo) registry.AuthConfig {
return ResolveAuthConfig(cli.ConfigFile(), index)
}
return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
}
// WithInitializeClient is passed to DockerCli.Initialize by callers who wish to set a particular API Client for use by the CLI.
func WithInitializeClient(makeClient func(dockerCli *DockerCli) (client.APIClient, error)) CLIOption {
return func(dockerCli *DockerCli) error {
var err error
dockerCli.client, err = makeClient(dockerCli)
return err
}
}
// Initialize the dockerCli runs initialization that must happen after command
// line flags are parsed.
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption) error {
for _, o := range ops {
if err := o(cli); err != nil {
return err
}
}
cliflags.SetLogLevel(opts.LogLevel)
if opts.ConfigDir != "" {
config.SetDir(opts.ConfigDir)
}
if opts.Debug {
debug.Enable()
}
if opts.Context != "" && len(opts.Hosts) > 0 {
return errors.New("conflicting options: either specify --host or --context, not both")
}
cli.options = opts
cli.configFile = config.LoadDefaultConfigFile(cli.err)
cli.currentContext = resolveContextName(cli.options, cli.configFile)
cli.contextStore = &ContextStoreWithDefault{
Store: store.New(config.ContextStoreDir(), cli.contextStoreConfig),
Resolver: func() (*DefaultContext, error) {
return ResolveDefaultContext(cli.options, cli.contextStoreConfig)
},
}
// TODO(krissetto): pass ctx to the funcs instead of using this
if cli.enableGlobalMeter {
cli.createGlobalMeterProvider(cli.baseCtx)
}
if cli.enableGlobalTracer {
cli.createGlobalTracerProvider(cli.baseCtx)
}
return nil
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(opts *cliflags.ClientOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
if opts.Context != "" && len(opts.Hosts) > 0 {
return nil, errors.New("conflicting options: either specify --host or --context, not both")
}
storeConfig := DefaultContextStoreConfig()
contextStore := &ContextStoreWithDefault{
Store: store.New(config.ContextStoreDir(), storeConfig),
Resolver: func() (*DefaultContext, error) {
return ResolveDefaultContext(opts, storeConfig)
},
}
endpoint, err := resolveDockerEndpoint(contextStore, resolveContextName(opts, configFile))
if err != nil {
return nil, errors.Wrap(err, "unable to resolve docker endpoint")
}
return newAPIClientFromEndpoint(endpoint, configFile)
}
func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile) (client.APIClient, error) {
opts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
if len(configFile.HTTPHeaders) > 0 {
opts = append(opts, client.WithHTTPHeaders(configFile.HTTPHeaders))
}
opts = append(opts, client.WithUserAgent(UserAgent()))
return client.NewClientWithOpts(opts...)
}
func resolveDockerEndpoint(s store.Reader, contextName string) (docker.Endpoint, error) {
if s == nil {
return docker.Endpoint{}, errors.New("no context store initialized")
}
ctxMeta, err := s.GetMetadata(contextName)
if err != nil {
return docker.Endpoint{}, err
}
epMeta, err := docker.EndpointFromContext(ctxMeta)
if err != nil {
return docker.Endpoint{}, err
}
return docker.WithTLSData(s, contextName, epMeta)
}
// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags)
func resolveDefaultDockerEndpoint(opts *cliflags.ClientOptions) (docker.Endpoint, error) {
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
if err != nil {
return docker.Endpoint{}, err
}
var (
skipTLSVerify bool
tlsData *dcontext.TLSData
)
if opts.TLSOptions != nil {
skipTLSVerify = opts.TLSOptions.InsecureSkipVerify
tlsData, err = dcontext.TLSDataFromFiles(opts.TLSOptions.CAFile, opts.TLSOptions.CertFile, opts.TLSOptions.KeyFile)
if err != nil {
return docker.Endpoint{}, err
}
}
return docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: host,
SkipTLSVerify: skipTLSVerify,
},
TLSData: tlsData,
}, nil
}
func (cli *DockerCli) getInitTimeout() time.Duration {
if cli.initTimeout != 0 {
return cli.initTimeout
}
return defaultInitTimeout
}
func (cli *DockerCli) initializeFromClient() {
ctx, cancel := context.WithTimeout(cli.baseCtx, cli.getInitTimeout())
defer cancel()
ping, err := cli.client.Ping(ctx)
if err != nil {
// Default to true if we fail to connect to daemon
cli.serverInfo = ServerInfo{HasExperimental: true}
if ping.APIVersion != "" {
cli.client.NegotiateAPIVersionPing(ping)
}
return
}
cli.serverInfo = ServerInfo{
HasExperimental: ping.Experimental,
OSType: ping.OSType,
BuildkitVersion: ping.BuilderVersion,
SwarmStatus: ping.SwarmStatus,
}
cli.client.NegotiateAPIVersionPing(ping)
}
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// ContextStore returns the ContextStore
func (cli *DockerCli) ContextStore() store.Store {
return cli.contextStore
}
// CurrentContext returns the current context name, based on flags,
// environment variables and the cli configuration file, in the following
// order of preference:
//
// 1. The "--context" command-line option.
// 2. The "DOCKER_CONTEXT" environment variable ([EnvOverrideContext]).
// 3. The current context as configured through the in "currentContext"
// field in the CLI configuration file ("~/.docker/config.json").
// 4. If no context is configured, use the "default" context.
//
// # Fallbacks for backward-compatibility
//
// To preserve backward-compatibility with the "pre-contexts" behavior,
// the "default" context is used if:
//
// - The "--host" option is set
// - The "DOCKER_HOST" ([client.EnvOverrideHost]) environment variable is set
// to a non-empty value.
//
// In these cases, the default context is used, which uses the host as
// specified in "DOCKER_HOST", and TLS config from flags/env vars.
//
// Setting both the "--context" and "--host" flags is ambiguous and results
// in an error when the cli is started.
//
// CurrentContext does not validate if the given context exists or if it's
// valid; errors may occur when trying to use it.
func (cli *DockerCli) CurrentContext() string {
return cli.currentContext
}
// CurrentContext returns the current context name, based on flags,
// environment variables and the cli configuration file. It does not
// validate if the given context exists or if it's valid; errors may
// occur when trying to use it.
//
// Refer to [DockerCli.CurrentContext] above for further details.
func resolveContextName(opts *cliflags.ClientOptions, cfg *configfile.ConfigFile) string {
if opts != nil && opts.Context != "" {
return opts.Context
}
if opts != nil && len(opts.Hosts) > 0 {
return DefaultContextName
}
if os.Getenv(client.EnvOverrideHost) != "" {
return DefaultContextName
}
if ctxName := os.Getenv(EnvOverrideContext); ctxName != "" {
return ctxName
}
if cfg != nil && cfg.CurrentContext != "" {
// We don't validate if this context exists: errors may occur when trying to use it.
return cfg.CurrentContext
}
return DefaultContextName
}
// DockerEndpoint returns the current docker endpoint
func (cli *DockerCli) DockerEndpoint() docker.Endpoint {
if err := cli.initialize(); err != nil {
// Note that we're not terminating here, as this function may be used
// in cases where we're able to continue.
_, _ = fmt.Fprintf(cli.Err(), "%v\n", cli.initErr)
}
return cli.dockerEndpoint
}
func (cli *DockerCli) getDockerEndPoint() (ep docker.Endpoint, err error) {
cn := cli.CurrentContext()
if cn == DefaultContextName {
return resolveDefaultDockerEndpoint(cli.options)
}
return resolveDockerEndpoint(cli.contextStore, cn)
}
func (cli *DockerCli) initialize() error {
cli.init.Do(func() {
cli.dockerEndpoint, cli.initErr = cli.getDockerEndPoint()
if cli.initErr != nil {
cli.initErr = errors.Wrap(cli.initErr, "unable to resolve docker endpoint")
return
}
if cli.client == nil {
if cli.client, cli.initErr = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile); cli.initErr != nil {
return
}
}
if cli.baseCtx == nil {
cli.baseCtx = context.Background()
}
cli.initializeFromClient()
})
return cli.initErr
}
// Apply all the operation on the cli
func (cli *DockerCli) Apply(ops ...CLIOption) error {
for _, op := range ops {
if err := op(cli); err != nil {
return err
}
}
return nil
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {
HasExperimental bool
OSType string
BuildkitVersion types.BuilderVersion
// SwarmStatus provides information about the current swarm status of the
// engine, obtained from the "Swarm" header in the API response.
//
// It can be a nil struct if the API version does not provide this header
// in the ping response, or if an error occurred, in which case the client
// should use other ways to get the current swarm status, such as the /swarm
// endpoint.
SwarmStatus *swarm.Status
}
// NewDockerCli returns a DockerCli instance with all operators applied on it.
// It applies by default the standard streams, and the content trust from
// environment.
func NewDockerCli(ops ...CLIOption) (*DockerCli, error) {
defaultOps := []CLIOption{
WithContentTrustFromEnv(),
WithDefaultContextStoreConfig(),
WithStandardStreams(),
}
ops = append(defaultOps, ops...)
cli := &DockerCli{baseCtx: context.Background()}
if err := cli.Apply(ops...); err != nil {
return nil, err
}
return cli, nil
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
var host string
switch len(hosts) {
case 0:
host = os.Getenv(client.EnvOverrideHost)
case 1:
host = hosts[0]
default:
return "", errors.New("Specify only one -H")
}
return dopts.ParseHost(tlsOptions != nil, host)
}
// UserAgent returns the user agent string used for making API requests
func UserAgent() string {
return "Docker-Client/" + version.Version + " (" + runtime.GOOS + ")"
}
var defaultStoreEndpoints = []store.NamedTypeGetter{
store.EndpointTypeGetter(docker.DockerEndpoint, func() any { return &docker.EndpointMeta{} }),
}
// RegisterDefaultStoreEndpoints registers a new named endpoint
// metadata type with the default context store config, so that
// endpoint will be supported by stores using the config returned by
// DefaultContextStoreConfig.
func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) {
defaultStoreEndpoints = append(defaultStoreEndpoints, ep...)
}
// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured.
func DefaultContextStoreConfig() store.Config {
return store.NewConfig(
func() any { return &DockerContext{} },
defaultStoreEndpoints...,
)
}

110
vendor/github.com/docker/cli/cli/command/cli_options.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package command
import (
"context"
"io"
"os"
"strconv"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/client"
"github.com/moby/term"
)
// CLIOption is a functional argument to apply options to a [DockerCli]. These
// options can be passed to [NewDockerCli] to initialize a new CLI, or
// applied with [DockerCli.Initialize] or [DockerCli.Apply].
type CLIOption func(cli *DockerCli) error
// WithStandardStreams sets a cli in, out and err streams with the standard streams.
func WithStandardStreams() CLIOption {
return func(cli *DockerCli) error {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
cli.in = streams.NewIn(stdin)
cli.out = streams.NewOut(stdout)
cli.err = streams.NewOut(stderr)
return nil
}
}
// WithBaseContext sets the base context of a cli. It is used to propagate
// the context from the command line to the client.
func WithBaseContext(ctx context.Context) CLIOption {
return func(cli *DockerCli) error {
cli.baseCtx = ctx
return nil
}
}
// WithCombinedStreams uses the same stream for the output and error streams.
func WithCombinedStreams(combined io.Writer) CLIOption {
return func(cli *DockerCli) error {
s := streams.NewOut(combined)
cli.out = s
cli.err = s
return nil
}
}
// WithInputStream sets a cli input stream.
func WithInputStream(in io.ReadCloser) CLIOption {
return func(cli *DockerCli) error {
cli.in = streams.NewIn(in)
return nil
}
}
// WithOutputStream sets a cli output stream.
func WithOutputStream(out io.Writer) CLIOption {
return func(cli *DockerCli) error {
cli.out = streams.NewOut(out)
return nil
}
}
// WithErrorStream sets a cli error stream.
func WithErrorStream(err io.Writer) CLIOption {
return func(cli *DockerCli) error {
cli.err = streams.NewOut(err)
return nil
}
}
// WithContentTrustFromEnv enables content trust on a cli from environment variable DOCKER_CONTENT_TRUST value.
func WithContentTrustFromEnv() CLIOption {
return func(cli *DockerCli) error {
cli.contentTrust = false
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
if t, err := strconv.ParseBool(e); t || err != nil {
// treat any other value as true
cli.contentTrust = true
}
}
return nil
}
}
// WithContentTrust enables content trust on a cli.
func WithContentTrust(enabled bool) CLIOption {
return func(cli *DockerCli) error {
cli.contentTrust = enabled
return nil
}
}
// WithDefaultContextStoreConfig configures the cli to use the default context store configuration.
func WithDefaultContextStoreConfig() CLIOption {
return func(cli *DockerCli) error {
cli.contextStoreConfig = DefaultContextStoreConfig()
return nil
}
}
// WithAPIClient configures the cli to use the given API client.
func WithAPIClient(c client.APIClient) CLIOption {
return func(cli *DockerCli) error {
cli.client = c
return nil
}
}

68
vendor/github.com/docker/cli/cli/command/context.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package command
import (
"encoding/json"
"errors"
"github.com/docker/cli/cli/context/store"
)
// DockerContext is a typed representation of what we put in Context metadata
type DockerContext struct {
Description string
AdditionalFields map[string]any
}
// MarshalJSON implements custom JSON marshalling
func (dc DockerContext) MarshalJSON() ([]byte, error) {
s := map[string]any{}
if dc.Description != "" {
s["Description"] = dc.Description
}
if dc.AdditionalFields != nil {
for k, v := range dc.AdditionalFields {
s[k] = v
}
}
return json.Marshal(s)
}
// UnmarshalJSON implements custom JSON marshalling
func (dc *DockerContext) UnmarshalJSON(payload []byte) error {
var data map[string]any
if err := json.Unmarshal(payload, &data); err != nil {
return err
}
for k, v := range data {
switch k {
case "Description":
dc.Description = v.(string)
default:
if dc.AdditionalFields == nil {
dc.AdditionalFields = make(map[string]any)
}
dc.AdditionalFields[k] = v
}
}
return nil
}
// GetDockerContext extracts metadata from stored context metadata
func GetDockerContext(storeMetadata store.Metadata) (DockerContext, error) {
if storeMetadata.Metadata == nil {
// can happen if we save endpoints before assigning a context metadata
// it is totally valid, and we should return a default initialized value
return DockerContext{}, nil
}
res, ok := storeMetadata.Metadata.(DockerContext)
if !ok {
return DockerContext{}, errors.New("context metadata is not a valid DockerContext")
}
if storeMetadata.Name == DefaultContextName {
res.Description = "Current DOCKER_HOST based configuration"
}
return res, nil
}

View File

@ -0,0 +1,202 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package command
import (
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
const (
// DefaultContextName is the name reserved for the default context (config & env based)
DefaultContextName = "default"
// EnvOverrideContext is the name of the environment variable that can be
// used to override the context to use. If set, it overrides the context
// that's set in the CLI's configuration file, but takes no effect if the
// "DOCKER_HOST" env-var is set (which takes precedence.
EnvOverrideContext = "DOCKER_CONTEXT"
)
// DefaultContext contains the default context data for all endpoints
type DefaultContext struct {
Meta store.Metadata
TLS store.ContextTLSData
}
// DefaultContextResolver is a function which resolves the default context base on the configuration and the env variables
type DefaultContextResolver func() (*DefaultContext, error)
// ContextStoreWithDefault implements the store.Store interface with a support for the default context
type ContextStoreWithDefault struct {
store.Store
Resolver DefaultContextResolver
}
// EndpointDefaultResolver is implemented by any EndpointMeta object
// which wants to be able to populate the store with whatever their default is.
type EndpointDefaultResolver interface {
// ResolveDefault returns values suitable for storing in store.Metadata.Endpoints
// and store.ContextTLSData.Endpoints.
//
// An error is only returned for something fatal, not simply
// the lack of a default (e.g. because the config file which
// would contain it is missing). If there is no default then
// returns nil, nil, nil.
//
//nolint:dupword // ignore "Duplicate words (nil,) found"
ResolveDefault() (any, *store.EndpointTLSData, error)
}
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
func ResolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*DefaultContext, error) {
contextTLSData := store.ContextTLSData{
Endpoints: make(map[string]store.EndpointTLSData),
}
contextMetadata := store.Metadata{
Endpoints: make(map[string]any),
Metadata: DockerContext{
Description: "",
},
Name: DefaultContextName,
}
dockerEP, err := resolveDefaultDockerEndpoint(opts)
if err != nil {
return nil, err
}
contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP.EndpointMeta
if dockerEP.TLSData != nil {
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData()
}
if err := config.ForeachEndpointType(func(n string, get store.TypeGetter) error {
if n == docker.DockerEndpoint { // handled above
return nil
}
ep := get()
if i, ok := ep.(EndpointDefaultResolver); ok {
meta, tls, err := i.ResolveDefault()
if err != nil {
return err
}
if meta == nil {
return nil
}
contextMetadata.Endpoints[n] = meta
if tls != nil {
contextTLSData.Endpoints[n] = *tls
}
}
// Nothing to be done
return nil
}); err != nil {
return nil, err
}
return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil
}
// List implements store.Store's List
func (s *ContextStoreWithDefault) List() ([]store.Metadata, error) {
contextList, err := s.Store.List()
if err != nil {
return nil, err
}
defaultContext, err := s.Resolver()
if err != nil {
return nil, err
}
return append(contextList, defaultContext.Meta), nil
}
// CreateOrUpdate is not allowed for the default context and fails
func (s *ContextStoreWithDefault) CreateOrUpdate(meta store.Metadata) error {
if meta.Name == DefaultContextName {
return errdefs.InvalidParameter(errors.New("default context cannot be created nor updated"))
}
return s.Store.CreateOrUpdate(meta)
}
// Remove is not allowed for the default context and fails
func (s *ContextStoreWithDefault) Remove(name string) error {
if name == DefaultContextName {
return errdefs.InvalidParameter(errors.New("default context cannot be removed"))
}
return s.Store.Remove(name)
}
// GetMetadata implements store.Store's GetMetadata
func (s *ContextStoreWithDefault) GetMetadata(name string) (store.Metadata, error) {
if name == DefaultContextName {
defaultContext, err := s.Resolver()
if err != nil {
return store.Metadata{}, err
}
return defaultContext.Meta, nil
}
return s.Store.GetMetadata(name)
}
// ResetTLSMaterial is not implemented for default context and fails
func (s *ContextStoreWithDefault) ResetTLSMaterial(name string, data *store.ContextTLSData) error {
if name == DefaultContextName {
return errdefs.InvalidParameter(errors.New("default context cannot be edited"))
}
return s.Store.ResetTLSMaterial(name, data)
}
// ResetEndpointTLSMaterial is not implemented for default context and fails
func (s *ContextStoreWithDefault) ResetEndpointTLSMaterial(contextName string, endpointName string, data *store.EndpointTLSData) error {
if contextName == DefaultContextName {
return errdefs.InvalidParameter(errors.New("default context cannot be edited"))
}
return s.Store.ResetEndpointTLSMaterial(contextName, endpointName, data)
}
// ListTLSFiles implements store.Store's ListTLSFiles
func (s *ContextStoreWithDefault) ListTLSFiles(name string) (map[string]store.EndpointFiles, error) {
if name == DefaultContextName {
defaultContext, err := s.Resolver()
if err != nil {
return nil, err
}
tlsfiles := make(map[string]store.EndpointFiles)
for epName, epTLSData := range defaultContext.TLS.Endpoints {
var files store.EndpointFiles
for filename := range epTLSData.Files {
files = append(files, filename)
}
tlsfiles[epName] = files
}
return tlsfiles, nil
}
return s.Store.ListTLSFiles(name)
}
// GetTLSData implements store.Store's GetTLSData
func (s *ContextStoreWithDefault) GetTLSData(contextName, endpointName, fileName string) ([]byte, error) {
if contextName == DefaultContextName {
defaultContext, err := s.Resolver()
if err != nil {
return nil, err
}
if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil {
return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName))
}
return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil
}
return s.Store.GetTLSData(contextName, endpointName, fileName)
}
// GetStorageInfo implements store.Store's GetStorageInfo
func (s *ContextStoreWithDefault) GetStorageInfo(contextName string) store.StorageInfo {
if contextName == DefaultContextName {
return store.StorageInfo{MetadataPath: "<IN MEMORY>", TLSPath: "<IN MEMORY>"}
}
return s.Store.GetStorageInfo(contextName)
}

View File

@ -0,0 +1,183 @@
package formatter
import (
"sort"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
)
const (
defaultBuildCacheTableFormat = "table {{.ID}}\t{{.Type}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}\t{{.Description}}"
cacheIDHeader = "CACHE ID"
cacheTypeHeader = "CACHE TYPE"
parentHeader = "PARENT"
lastUsedSinceHeader = "LAST USED"
usageCountHeader = "USAGE"
inUseHeader = "IN USE"
sharedHeader = "SHARED"
)
// NewBuildCacheFormat returns a Format for rendering using a Context
func NewBuildCacheFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return DefaultQuietFormat
}
return Format(defaultBuildCacheTableFormat)
case RawFormatKey:
if quiet {
return `build_cache_id: {{.ID}}`
}
format := `build_cache_id: {{.ID}}
parent_id: {{.Parent}}
build_cache_type: {{.CacheType}}
description: {{.Description}}
created_at: {{.CreatedAt}}
created_since: {{.CreatedSince}}
last_used_at: {{.LastUsedAt}}
last_used_since: {{.LastUsedSince}}
usage_count: {{.UsageCount}}
in_use: {{.InUse}}
shared: {{.Shared}}
`
return Format(format)
}
return Format(source)
}
func buildCacheSort(buildCache []*types.BuildCache) {
sort.Slice(buildCache, func(i, j int) bool {
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
switch {
case lui == nil && luj == nil:
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
case lui == nil:
return true
case luj == nil:
return false
case lui.Equal(*luj):
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
default:
return lui.Before(*luj)
}
})
}
// BuildCacheWrite renders the context for a list of containers
func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
render := func(format func(subContext SubContext) error) error {
buildCacheSort(buildCaches)
for _, bc := range buildCaches {
err := format(&buildCacheContext{trunc: ctx.Trunc, v: bc})
if err != nil {
return err
}
}
return nil
}
return ctx.Write(newBuildCacheContext(), render)
}
type buildCacheContext struct {
HeaderContext
trunc bool
v *types.BuildCache
}
func newBuildCacheContext() *buildCacheContext {
buildCacheCtx := buildCacheContext{}
buildCacheCtx.Header = SubHeaderContext{
"ID": cacheIDHeader,
"Parent": parentHeader,
"CacheType": cacheTypeHeader,
"Size": SizeHeader,
"CreatedSince": CreatedSinceHeader,
"LastUsedSince": lastUsedSinceHeader,
"UsageCount": usageCountHeader,
"InUse": inUseHeader,
"Shared": sharedHeader,
"Description": DescriptionHeader,
}
return &buildCacheCtx
}
func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *buildCacheContext) ID() string {
id := c.v.ID
if c.trunc {
id = stringid.TruncateID(c.v.ID)
}
if c.v.InUse {
return id + "*"
}
return id
}
func (c *buildCacheContext) Parent() string {
var parent string
if len(c.v.Parents) > 0 {
parent = strings.Join(c.v.Parents, ", ")
} else {
parent = c.v.Parent //nolint:staticcheck // Ignore SA1019: Field was deprecated in API v1.42, but kept for backward compatibility
}
if c.trunc {
return stringid.TruncateID(parent)
}
return parent
}
func (c *buildCacheContext) CacheType() string {
return c.v.Type
}
func (c *buildCacheContext) Description() string {
return c.v.Description
}
func (c *buildCacheContext) Size() string {
return units.HumanSizeWithPrecision(float64(c.v.Size), 3)
}
func (c *buildCacheContext) CreatedAt() string {
return c.v.CreatedAt.String()
}
func (c *buildCacheContext) CreatedSince() string {
return units.HumanDuration(time.Now().UTC().Sub(c.v.CreatedAt)) + " ago"
}
func (c *buildCacheContext) LastUsedAt() string {
if c.v.LastUsedAt == nil {
return ""
}
return c.v.LastUsedAt.String()
}
func (c *buildCacheContext) LastUsedSince() string {
if c.v.LastUsedAt == nil {
return ""
}
return units.HumanDuration(time.Now().UTC().Sub(*c.v.LastUsedAt)) + " ago"
}
func (c *buildCacheContext) UsageCount() string {
return strconv.Itoa(c.v.UsageCount)
}
func (c *buildCacheContext) InUse() string {
return strconv.FormatBool(c.v.InUse)
}
func (c *buildCacheContext) Shared() string {
return strconv.FormatBool(c.v.Shared)
}

View File

@ -0,0 +1,395 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package formatter
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
)
const (
defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}"
namesHeader = "NAMES"
commandHeader = "COMMAND"
runningForHeader = "CREATED"
mountsHeader = "MOUNTS"
localVolumes = "LOCAL VOLUMES"
networksHeader = "NETWORKS"
)
// NewContainerFormat returns a Format for rendering using a Context
func NewContainerFormat(source string, quiet bool, size bool) Format {
switch source {
case TableFormatKey, "": // table formatting is the default if none is set.
if quiet {
return DefaultQuietFormat
}
format := defaultContainerTableFormat
if size {
format += `\t{{.Size}}`
}
return Format(format)
case RawFormatKey:
if quiet {
return `container_id: {{.ID}}`
}
format := `container_id: {{.ID}}
image: {{.Image}}
command: {{.Command}}
created_at: {{.CreatedAt}}
state: {{- pad .State 1 0}}
status: {{- pad .Status 1 0}}
names: {{.Names}}
labels: {{- pad .Labels 1 0}}
ports: {{- pad .Ports 1 0}}
`
if size {
format += `size: {{.Size}}\n`
}
return Format(format)
default: // custom format
if quiet {
return DefaultQuietFormat
}
return Format(source)
}
}
// ContainerWrite renders the context for a list of containers
func ContainerWrite(ctx Context, containers []types.Container) error {
render := func(format func(subContext SubContext) error) error {
for _, container := range containers {
err := format(&ContainerContext{trunc: ctx.Trunc, c: container})
if err != nil {
return err
}
}
return nil
}
return ctx.Write(NewContainerContext(), render)
}
// ContainerContext is a struct used for rendering a list of containers in a Go template.
type ContainerContext struct {
HeaderContext
trunc bool
c types.Container
// FieldsUsed is used in the pre-processing step to detect which fields are
// used in the template. It's currently only used to detect use of the .Size
// field which (if used) automatically sets the '--size' option when making
// the API call.
FieldsUsed map[string]any
}
// NewContainerContext creates a new context for rendering containers
func NewContainerContext() *ContainerContext {
containerCtx := ContainerContext{}
containerCtx.Header = SubHeaderContext{
"ID": ContainerIDHeader,
"Names": namesHeader,
"Image": ImageHeader,
"Command": commandHeader,
"CreatedAt": CreatedAtHeader,
"RunningFor": runningForHeader,
"Ports": PortsHeader,
"State": StateHeader,
"Status": StatusHeader,
"Size": SizeHeader,
"Labels": LabelsHeader,
"Mounts": mountsHeader,
"LocalVolumes": localVolumes,
"Networks": networksHeader,
}
return &containerCtx
}
// MarshalJSON makes ContainerContext implement json.Marshaler
func (c *ContainerContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
// ID returns the container's ID as a string. Depending on the `--no-trunc`
// option being set, the full or truncated ID is returned.
func (c *ContainerContext) ID() string {
if c.trunc {
return stringid.TruncateID(c.c.ID)
}
return c.c.ID
}
// Names returns a comma-separated string of the container's names, with their
// slash (/) prefix stripped. Additional names for the container (related to the
// legacy `--link` feature) are omitted.
func (c *ContainerContext) Names() string {
names := StripNamePrefix(c.c.Names)
if c.trunc {
for _, name := range names {
if len(strings.Split(name, "/")) == 1 {
names = []string{name}
break
}
}
}
return strings.Join(names, ",")
}
// StripNamePrefix removes prefix from string, typically container names as returned by `ContainersList` API
func StripNamePrefix(ss []string) []string {
sss := make([]string, len(ss))
for i, s := range ss {
sss[i] = s[1:]
}
return sss
}
// Image returns the container's image reference. If the trunc option is set,
// the image's registry digest can be included.
func (c *ContainerContext) Image() string {
if c.c.Image == "" {
return "<no image>"
}
if c.trunc {
if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) {
return trunc
}
// truncate digest if no-trunc option was not selected
ref, err := reference.ParseNormalizedNamed(c.c.Image)
if err == nil {
if nt, ok := ref.(reference.NamedTagged); ok {
// case for when a tag is provided
if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil {
return reference.FamiliarString(namedTagged)
}
} else {
// case for when a tag is not provided
named := reference.TrimNamed(ref)
return reference.FamiliarString(named)
}
}
}
return c.c.Image
}
// Command returns's the container's command. If the trunc option is set, the
// returned command is truncated (ellipsized).
func (c *ContainerContext) Command() string {
command := c.c.Command
if c.trunc {
command = Ellipsis(command, 20)
}
return strconv.Quote(command)
}
// CreatedAt returns the "Created" date/time of the container as a unix timestamp.
func (c *ContainerContext) CreatedAt() string {
return time.Unix(c.c.Created, 0).String()
}
// RunningFor returns a human-readable representation of the duration for which
// the container has been running.
//
// Note that this duration is calculated on the client, and as such is influenced
// by clock skew between the client and the daemon.
func (c *ContainerContext) RunningFor() string {
createdAt := time.Unix(c.c.Created, 0)
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
}
// Ports returns a comma-separated string representing open ports of the container
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
// it's used by command 'docker ps'
// Both published and exposed ports are included.
func (c *ContainerContext) Ports() string {
return DisplayablePorts(c.c.Ports)
}
// State returns the container's current state (e.g. "running" or "paused")
func (c *ContainerContext) State() string {
return c.c.State
}
// Status returns the container's status in a human readable form (for example,
// "Up 24 hours" or "Exited (0) 8 days ago")
func (c *ContainerContext) Status() string {
return c.c.Status
}
// Size returns the container's size and virtual size (e.g. "2B (virtual 21.5MB)")
func (c *ContainerContext) Size() string {
if c.FieldsUsed == nil {
c.FieldsUsed = map[string]any{}
}
c.FieldsUsed["Size"] = struct{}{}
srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)
sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3)
sf := srw
if c.c.SizeRootFs > 0 {
sf = fmt.Sprintf("%s (virtual %s)", srw, sv)
}
return sf
}
// Labels returns a comma-separated string of labels present on the container.
func (c *ContainerContext) Labels() string {
if c.c.Labels == nil {
return ""
}
joinLabels := make([]string, 0, len(c.c.Labels))
for k, v := range c.c.Labels {
joinLabels = append(joinLabels, k+"="+v)
}
return strings.Join(joinLabels, ",")
}
// Label returns the value of the label with the given name or an empty string
// if the given label does not exist.
func (c *ContainerContext) Label(name string) string {
if c.c.Labels == nil {
return ""
}
return c.c.Labels[name]
}
// Mounts returns a comma-separated string of mount names present on the container.
// If the trunc option is set, names can be truncated (ellipsized).
func (c *ContainerContext) Mounts() string {
var name string
mounts := make([]string, 0, len(c.c.Mounts))
for _, m := range c.c.Mounts {
if m.Name == "" {
name = m.Source
} else {
name = m.Name
}
if c.trunc {
name = Ellipsis(name, 15)
}
mounts = append(mounts, name)
}
return strings.Join(mounts, ",")
}
// LocalVolumes returns the number of volumes using the "local" volume driver.
func (c *ContainerContext) LocalVolumes() string {
count := 0
for _, m := range c.c.Mounts {
if m.Driver == "local" {
count++
}
}
return strconv.Itoa(count)
}
// Networks returns a comma-separated string of networks that the container is
// attached to.
func (c *ContainerContext) Networks() string {
if c.c.NetworkSettings == nil {
return ""
}
networks := make([]string, 0, len(c.c.NetworkSettings.Networks))
for k := range c.c.NetworkSettings.Networks {
networks = append(networks, k)
}
return strings.Join(networks, ",")
}
// DisplayablePorts returns formatted string representing open ports of container
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
// it's used by command 'docker ps'
func DisplayablePorts(ports []types.Port) string {
type portGroup struct {
first uint16
last uint16
}
groupMap := make(map[string]*portGroup)
var result []string //nolint:prealloc
var hostMappings []string
var groupMapKeys []string
sort.Slice(ports, func(i, j int) bool {
return comparePorts(ports[i], ports[j])
})
for _, port := range ports {
current := port.PrivatePort
portKey := port.Type
if port.IP != "" {
if port.PublicPort != current {
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
continue
}
portKey = port.IP + "/" + port.Type
}
group := groupMap[portKey]
if group == nil {
groupMap[portKey] = &portGroup{first: current, last: current}
// record order that groupMap keys are created
groupMapKeys = append(groupMapKeys, portKey)
continue
}
if current == (group.last + 1) {
group.last = current
continue
}
result = append(result, formGroup(portKey, group.first, group.last))
groupMap[portKey] = &portGroup{first: current, last: current}
}
for _, portKey := range groupMapKeys {
g := groupMap[portKey]
result = append(result, formGroup(portKey, g.first, g.last))
}
result = append(result, hostMappings...)
return strings.Join(result, ", ")
}
func formGroup(key string, start, last uint16) string {
parts := strings.Split(key, "/")
groupType := parts[0]
var ip string
if len(parts) > 1 {
ip = parts[0]
groupType = parts[1]
}
group := strconv.Itoa(int(start))
if start != last {
group = fmt.Sprintf("%s-%d", group, last)
}
if ip != "" {
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
}
return group + "/" + groupType
}
func comparePorts(i, j types.Port) bool {
if i.PrivatePort != j.PrivatePort {
return i.PrivatePort < j.PrivatePort
}
if i.IP != j.IP {
return i.IP < j.IP
}
if i.PublicPort != j.PublicPort {
return i.PublicPort < j.PublicPort
}
return i.Type < j.Type
}

View File

@ -0,0 +1,102 @@
package formatter
import "encoding/json"
const (
// ClientContextTableFormat is the default client context format.
ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.Error}}"
dockerEndpointHeader = "DOCKER ENDPOINT"
quietContextFormat = "{{.Name}}"
maxErrLength = 45
)
// NewClientContextFormat returns a Format for rendering using a Context
func NewClientContextFormat(source string, quiet bool) Format {
if quiet {
return quietContextFormat
}
if source == TableFormatKey {
return ClientContextTableFormat
}
return Format(source)
}
// ClientContext is a context for display
type ClientContext struct {
Name string
Description string
DockerEndpoint string
Current bool
Error string
// ContextType is a temporary field for compatibility with
// Visual Studio, which depends on this from the "cloud integration"
// wrapper.
//
// Deprecated: this type is only for backward-compatibility. Do not use.
ContextType string `json:"ContextType,omitempty"`
}
// ClientContextWrite writes formatted contexts using the Context
func ClientContextWrite(ctx Context, contexts []*ClientContext) error {
render := func(format func(subContext SubContext) error) error {
for _, context := range contexts {
if err := format(&clientContextContext{c: context}); err != nil {
return err
}
}
return nil
}
return ctx.Write(newClientContextContext(), render)
}
type clientContextContext struct {
HeaderContext
c *ClientContext
}
func newClientContextContext() *clientContextContext {
ctx := clientContextContext{}
ctx.Header = SubHeaderContext{
"Name": NameHeader,
"Description": DescriptionHeader,
"DockerEndpoint": dockerEndpointHeader,
"Error": ErrorHeader,
}
return &ctx
}
func (c *clientContextContext) MarshalJSON() ([]byte, error) {
if c.c.ContextType != "" {
// We only have ContextType set for plain "json" or "{{json .}}" formatting,
// so we should be able to just use the default json.Marshal with no
// special handling.
return json.Marshal(c.c)
}
// FIXME(thaJeztah): why do we need a special marshal function here?
return MarshalJSON(c)
}
func (c *clientContextContext) Current() bool {
return c.c.Current
}
func (c *clientContextContext) Name() string {
return c.c.Name
}
func (c *clientContextContext) Description() string {
return c.c.Description
}
func (c *clientContextContext) DockerEndpoint() string {
return c.c.DockerEndpoint
}
// Error returns the truncated error (if any) that occurred when loading the context.
func (c *clientContextContext) Error() string {
// TODO(thaJeztah) add "--no-trunc" option to context ls and set default to 30 cols to match "docker service ps"
return Ellipsis(c.c.Error, maxErrLength)
}

View File

@ -0,0 +1,51 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package formatter
import "strings"
// Common header constants
const (
CreatedSinceHeader = "CREATED"
CreatedAtHeader = "CREATED AT"
SizeHeader = "SIZE"
LabelsHeader = "LABELS"
NameHeader = "NAME"
DescriptionHeader = "DESCRIPTION"
DriverHeader = "DRIVER"
ScopeHeader = "SCOPE"
StateHeader = "STATE"
StatusHeader = "STATUS"
PortsHeader = "PORTS"
ImageHeader = "IMAGE"
ErrorHeader = "ERROR"
ContainerIDHeader = "CONTAINER ID"
)
// SubContext defines what Context implementation should provide
type SubContext interface {
FullHeader() any
}
// SubHeaderContext is a map destined to formatter header (table format)
type SubHeaderContext map[string]string
// Label returns the header label for the specified string
func (c SubHeaderContext) Label(name string) string {
n := strings.Split(name, ".")
r := strings.NewReplacer("-", " ", "_", " ")
h := r.Replace(n[len(n)-1])
return h
}
// HeaderContext provides the subContext interface for managing headers
type HeaderContext struct {
Header any
}
// FullHeader returns the header as an interface
func (c *HeaderContext) FullHeader() any {
return c.Header
}

View File

@ -0,0 +1,477 @@
package formatter
import (
"bytes"
"fmt"
"strconv"
"strings"
"text/template"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/volume"
units "github.com/docker/go-units"
)
const (
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}"
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}"
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
typeHeader = "TYPE"
totalHeader = "TOTAL"
activeHeader = "ACTIVE"
reclaimableHeader = "RECLAIMABLE"
containersHeader = "CONTAINERS"
sharedSizeHeader = "SHARED SIZE"
uniqueSizeHeader = "UNIQUE SIZE"
)
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
type DiskUsageContext struct {
Context
Verbose bool
LayersSize int64
Images []*image.Summary
Containers []*types.Container
Volumes []*volume.Volume
BuildCache []*types.BuildCache
BuilderSize int64
}
func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
ctx.buffer = bytes.NewBufferString("")
ctx.header = ""
ctx.Format = Format(format)
ctx.preFormat()
return ctx.parseFormat()
}
// NewDiskUsageFormat returns a format for rendering an DiskUsageContext
func NewDiskUsageFormat(source string, verbose bool) Format {
switch {
case verbose && source == RawFormatKey:
format := `{{range .Images}}type: Image
` + NewImageFormat(source, false, true) + `
{{end -}}
{{range .Containers}}type: Container
` + NewContainerFormat(source, false, true) + `
{{end -}}
{{range .Volumes}}type: Volume
` + NewVolumeFormat(source, false) + `
{{end -}}
{{range .BuildCache}}type: Build Cache
` + NewBuildCacheFormat(source, false) + `
{{end -}}`
return format
case !verbose && source == TableFormatKey:
return Format(defaultDiskUsageTableFormat)
case !verbose && source == RawFormatKey:
format := `type: {{.Type}}
total: {{.TotalCount}}
active: {{.Active}}
size: {{.Size}}
reclaimable: {{.Reclaimable}}
`
return Format(format)
default:
return Format(source)
}
}
func (ctx *DiskUsageContext) Write() (err error) {
if ctx.Verbose {
return ctx.verboseWrite()
}
ctx.buffer = bytes.NewBufferString("")
ctx.preFormat()
tmpl, err := ctx.parseFormat()
if err != nil {
return err
}
err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
totalSize: ctx.LayersSize,
images: ctx.Images,
})
if err != nil {
return err
}
err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
containers: ctx.Containers,
})
if err != nil {
return err
}
err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
volumes: ctx.Volumes,
})
if err != nil {
return err
}
err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{
builderSize: ctx.BuilderSize,
buildCache: ctx.BuildCache,
})
if err != nil {
return err
}
diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}}
diskUsageContainersCtx.Header = SubHeaderContext{
"Type": typeHeader,
"TotalCount": totalHeader,
"Active": activeHeader,
"Size": SizeHeader,
"Reclaimable": reclaimableHeader,
}
ctx.postFormat(tmpl, &diskUsageContainersCtx)
return err
}
type diskUsageContext struct {
Images []*imageContext
Containers []*ContainerContext
Volumes []*volumeContext
BuildCache []*buildCacheContext
}
func (ctx *DiskUsageContext) verboseWrite() error {
duc := &diskUsageContext{
Images: make([]*imageContext, 0, len(ctx.Images)),
Containers: make([]*ContainerContext, 0, len(ctx.Containers)),
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
}
trunc := ctx.Format.IsTable()
// First images
for _, i := range ctx.Images {
repo := "<none>"
tag := "<none>"
if len(i.RepoTags) > 0 && !isDangling(*i) {
// Only show the first tag
ref, err := reference.ParseNormalizedNamed(i.RepoTags[0])
if err != nil {
continue
}
if nt, ok := ref.(reference.NamedTagged); ok {
repo = reference.FamiliarName(ref)
tag = nt.Tag()
}
}
duc.Images = append(duc.Images, &imageContext{
repo: repo,
tag: tag,
trunc: trunc,
i: *i,
})
}
// Now containers
for _, c := range ctx.Containers {
// Don't display the virtual size
c.SizeRootFs = 0
duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: *c})
}
// And volumes
for _, v := range ctx.Volumes {
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
}
// And build cache
buildCacheSort(ctx.BuildCache)
for _, v := range ctx.BuildCache {
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
}
if ctx.Format == TableFormatKey {
return ctx.verboseWriteTable(duc)
}
ctx.preFormat()
tmpl, err := ctx.parseFormat()
if err != nil {
return err
}
return tmpl.Execute(ctx.Output, duc)
}
func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("Images space usage:\n\n"))
for _, img := range duc.Images {
if err := ctx.contextFormat(tmpl, img); err != nil {
return err
}
}
ctx.postFormat(tmpl, newImageContext())
tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
for _, c := range duc.Containers {
if err := ctx.contextFormat(tmpl, c); err != nil {
return err
}
}
ctx.postFormat(tmpl, NewContainerContext())
tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
for _, v := range duc.Volumes {
if err := ctx.contextFormat(tmpl, v); err != nil {
return err
}
}
ctx.postFormat(tmpl, newVolumeContext())
tmpl, err = ctx.startSubsection(defaultDiskUsageBuildCacheTableFormat)
if err != nil {
return err
}
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
for _, v := range duc.BuildCache {
if err := ctx.contextFormat(tmpl, v); err != nil {
return err
}
}
ctx.postFormat(tmpl, newBuildCacheContext())
return nil
}
type diskUsageImagesContext struct {
HeaderContext
totalSize int64
images []*image.Summary
}
func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *diskUsageImagesContext) Type() string {
return "Images"
}
func (c *diskUsageImagesContext) TotalCount() string {
return strconv.Itoa(len(c.images))
}
func (c *diskUsageImagesContext) Active() string {
used := 0
for _, i := range c.images {
if i.Containers > 0 {
used++
}
}
return strconv.Itoa(used)
}
func (c *diskUsageImagesContext) Size() string {
return units.HumanSize(float64(c.totalSize))
}
func (c *diskUsageImagesContext) Reclaimable() string {
var used int64
for _, i := range c.images {
if i.Containers != 0 {
if i.Size == -1 || i.SharedSize == -1 {
continue
}
used += i.Size - i.SharedSize
}
}
reclaimable := c.totalSize - used
if c.totalSize > 0 {
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
}
return units.HumanSize(float64(reclaimable))
}
type diskUsageContainersContext struct {
HeaderContext
containers []*types.Container
}
func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *diskUsageContainersContext) Type() string {
return "Containers"
}
func (c *diskUsageContainersContext) TotalCount() string {
return strconv.Itoa(len(c.containers))
}
func (c *diskUsageContainersContext) isActive(container types.Container) bool {
return strings.Contains(container.State, "running") ||
strings.Contains(container.State, "paused") ||
strings.Contains(container.State, "restarting")
}
func (c *diskUsageContainersContext) Active() string {
used := 0
for _, container := range c.containers {
if c.isActive(*container) {
used++
}
}
return strconv.Itoa(used)
}
func (c *diskUsageContainersContext) Size() string {
var size int64
for _, container := range c.containers {
size += container.SizeRw
}
return units.HumanSize(float64(size))
}
func (c *diskUsageContainersContext) Reclaimable() string {
var reclaimable int64
var totalSize int64
for _, container := range c.containers {
if !c.isActive(*container) {
reclaimable += container.SizeRw
}
totalSize += container.SizeRw
}
if totalSize > 0 {
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
}
return units.HumanSize(float64(reclaimable))
}
type diskUsageVolumesContext struct {
HeaderContext
volumes []*volume.Volume
}
func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *diskUsageVolumesContext) Type() string {
return "Local Volumes"
}
func (c *diskUsageVolumesContext) TotalCount() string {
return strconv.Itoa(len(c.volumes))
}
func (c *diskUsageVolumesContext) Active() string {
used := 0
for _, v := range c.volumes {
if v.UsageData.RefCount > 0 {
used++
}
}
return strconv.Itoa(used)
}
func (c *diskUsageVolumesContext) Size() string {
var size int64
for _, v := range c.volumes {
if v.UsageData.Size != -1 {
size += v.UsageData.Size
}
}
return units.HumanSize(float64(size))
}
func (c *diskUsageVolumesContext) Reclaimable() string {
var reclaimable int64
var totalSize int64
for _, v := range c.volumes {
if v.UsageData.Size != -1 {
if v.UsageData.RefCount == 0 {
reclaimable += v.UsageData.Size
}
totalSize += v.UsageData.Size
}
}
if totalSize > 0 {
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
}
return units.HumanSize(float64(reclaimable))
}
type diskUsageBuilderContext struct {
HeaderContext
builderSize int64
buildCache []*types.BuildCache
}
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *diskUsageBuilderContext) Type() string {
return "Build Cache"
}
func (c *diskUsageBuilderContext) TotalCount() string {
return strconv.Itoa(len(c.buildCache))
}
func (c *diskUsageBuilderContext) Active() string {
numActive := 0
for _, bc := range c.buildCache {
if bc.InUse {
numActive++
}
}
return strconv.Itoa(numActive)
}
func (c *diskUsageBuilderContext) Size() string {
return units.HumanSize(float64(c.builderSize))
}
func (c *diskUsageBuilderContext) Reclaimable() string {
var inUseBytes int64
for _, bc := range c.buildCache {
if bc.InUse && !bc.Shared {
inUseBytes += bc.Size
}
}
return units.HumanSize(float64(c.builderSize - inUseBytes))
}

View File

@ -0,0 +1,61 @@
package formatter
import (
"unicode/utf8"
"golang.org/x/text/width"
)
// charWidth returns the number of horizontal positions a character occupies,
// and is used to account for wide characters when displaying strings.
//
// In a broad sense, wide characters include East Asian Wide, East Asian Full-width,
// (when not in East Asian context) see http://unicode.org/reports/tr11/.
func charWidth(r rune) int {
switch width.LookupRune(r).Kind() {
case width.EastAsianWide, width.EastAsianFullwidth:
return 2
default:
return 1
}
}
// Ellipsis truncates a string to fit within maxDisplayWidth, and appends ellipsis (…).
// For maxDisplayWidth of 1 and lower, no ellipsis is appended.
// For maxDisplayWidth of 1, first char of string will return even if its width > 1.
func Ellipsis(s string, maxDisplayWidth int) string {
if maxDisplayWidth <= 0 {
return ""
}
rs := []rune(s)
if maxDisplayWidth == 1 {
return string(rs[0])
}
byteLen := len(s)
if byteLen == utf8.RuneCountInString(s) {
if byteLen <= maxDisplayWidth {
return s
}
return string(rs[:maxDisplayWidth-1]) + "…"
}
var (
display = make([]int, 0, len(rs))
displayWidth int
)
for _, r := range rs {
cw := charWidth(r)
displayWidth += cw
display = append(display, displayWidth)
}
if displayWidth <= maxDisplayWidth {
return s
}
for i := range display {
if display[i] <= maxDisplayWidth-1 && display[i+1] > maxDisplayWidth-1 {
return string(rs[:i+1]) + "…"
}
}
return s
}

View File

@ -0,0 +1,131 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package formatter
import (
"bytes"
"io"
"strings"
"text/template"
"github.com/docker/cli/cli/command/formatter/tabwriter"
"github.com/docker/cli/templates"
"github.com/pkg/errors"
)
// Format keys used to specify certain kinds of output formats
const (
TableFormatKey = "table"
RawFormatKey = "raw"
PrettyFormatKey = "pretty"
JSONFormatKey = "json"
DefaultQuietFormat = "{{.ID}}"
JSONFormat = "{{json .}}"
)
// Format is the format string rendered using the Context
type Format string
// IsTable returns true if the format is a table-type format
func (f Format) IsTable() bool {
return strings.HasPrefix(string(f), TableFormatKey)
}
// IsJSON returns true if the format is the json format
func (f Format) IsJSON() bool {
return string(f) == JSONFormatKey
}
// Contains returns true if the format contains the substring
func (f Format) Contains(sub string) bool {
return strings.Contains(string(f), sub)
}
// Context contains information required by the formatter to print the output as desired.
type Context struct {
// Output is the output stream to which the formatted string is written.
Output io.Writer
// Format is used to choose raw, table or custom format for the output.
Format Format
// Trunc when set to true will truncate the output of certain fields such as Container ID.
Trunc bool
// internal element
finalFormat string
header any
buffer *bytes.Buffer
}
func (c *Context) preFormat() {
c.finalFormat = string(c.Format)
// TODO: handle this in the Format type
switch {
case c.Format.IsTable():
c.finalFormat = c.finalFormat[len(TableFormatKey):]
case c.Format.IsJSON():
c.finalFormat = JSONFormat
}
c.finalFormat = strings.Trim(c.finalFormat, " ")
r := strings.NewReplacer(`\t`, "\t", `\n`, "\n")
c.finalFormat = r.Replace(c.finalFormat)
}
func (c *Context) parseFormat() (*template.Template, error) {
tmpl, err := templates.Parse(c.finalFormat)
if err != nil {
return tmpl, errors.Wrap(err, "template parsing error")
}
return tmpl, err
}
func (c *Context) postFormat(tmpl *template.Template, subContext SubContext) {
if c.Format.IsTable() {
t := tabwriter.NewWriter(c.Output, 10, 1, 3, ' ', 0)
buffer := bytes.NewBufferString("")
tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader())
buffer.WriteTo(t)
t.Write([]byte("\n"))
c.buffer.WriteTo(t)
t.Flush()
} else {
c.buffer.WriteTo(c.Output)
}
}
func (c *Context) contextFormat(tmpl *template.Template, subContext SubContext) error {
if err := tmpl.Execute(c.buffer, subContext); err != nil {
return errors.Wrap(err, "template parsing error")
}
if c.Format.IsTable() && c.header != nil {
c.header = subContext.FullHeader()
}
c.buffer.WriteString("\n")
return nil
}
// SubFormat is a function type accepted by Write()
type SubFormat func(func(SubContext) error) error
// Write the template to the buffer using this Context
func (c *Context) Write(sub SubContext, f SubFormat) error {
c.buffer = bytes.NewBufferString("")
c.preFormat()
tmpl, err := c.parseFormat()
if err != nil {
return err
}
subFormat := func(subContext SubContext) error {
return c.contextFormat(tmpl, subContext)
}
if err := f(subFormat); err != nil {
return err
}
c.postFormat(tmpl, sub)
return nil
}

View File

@ -0,0 +1,282 @@
package formatter
import (
"strconv"
"time"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/pkg/stringid"
units "github.com/docker/go-units"
)
const (
defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{if .CreatedSince }}{{.CreatedSince}}{{else}}N/A{{end}}\t{{.Size}}"
defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{if .CreatedSince }}{{.CreatedSince}}{{else}}N/A{{end}}\t{{.Size}}"
imageIDHeader = "IMAGE ID"
repositoryHeader = "REPOSITORY"
tagHeader = "TAG"
digestHeader = "DIGEST"
)
// ImageContext contains image specific information required by the formatter, encapsulate a Context struct.
type ImageContext struct {
Context
Digest bool
}
func isDangling(img image.Summary) bool {
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
return true
}
return len(img.RepoTags) == 1 && img.RepoTags[0] == "<none>:<none>" && len(img.RepoDigests) == 1 && img.RepoDigests[0] == "<none>@<none>"
}
// NewImageFormat returns a format for rendering an ImageContext
func NewImageFormat(source string, quiet bool, digest bool) Format {
switch source {
case TableFormatKey:
switch {
case quiet:
return DefaultQuietFormat
case digest:
return defaultImageTableFormatWithDigest
default:
return defaultImageTableFormat
}
case RawFormatKey:
switch {
case quiet:
return `image_id: {{.ID}}`
case digest:
return `repository: {{ .Repository }}
tag: {{.Tag}}
digest: {{.Digest}}
image_id: {{.ID}}
created_at: {{.CreatedAt}}
virtual_size: {{.Size}}
`
default:
return `repository: {{ .Repository }}
tag: {{.Tag}}
image_id: {{.ID}}
created_at: {{.CreatedAt}}
virtual_size: {{.Size}}
`
}
}
format := Format(source)
if format.IsTable() && digest && !format.Contains("{{.Digest}}") {
format += "\t{{.Digest}}"
}
return format
}
// ImageWrite writes the formatter images using the ImageContext
func ImageWrite(ctx ImageContext, images []image.Summary) error {
render := func(format func(subContext SubContext) error) error {
return imageFormat(ctx, images, format)
}
return ctx.Write(newImageContext(), render)
}
// needDigest determines whether the image digest should be ignored or not when writing image context
func needDigest(ctx ImageContext) bool {
return ctx.Digest || ctx.Format.Contains("{{.Digest}}")
}
func imageFormat(ctx ImageContext, images []image.Summary, format func(subContext SubContext) error) error {
for _, img := range images {
formatted := []*imageContext{}
if isDangling(img) {
formatted = append(formatted, &imageContext{
trunc: ctx.Trunc,
i: img,
repo: "<none>",
tag: "<none>",
digest: "<none>",
})
} else {
formatted = imageFormatTaggedAndDigest(ctx, img)
}
for _, imageCtx := range formatted {
if err := format(imageCtx); err != nil {
return err
}
}
}
return nil
}
func imageFormatTaggedAndDigest(ctx ImageContext, img image.Summary) []*imageContext {
repoTags := map[string][]string{}
repoDigests := map[string][]string{}
images := []*imageContext{}
for _, refString := range img.RepoTags {
ref, err := reference.ParseNormalizedNamed(refString)
if err != nil {
continue
}
if nt, ok := ref.(reference.NamedTagged); ok {
familiarRef := reference.FamiliarName(ref)
repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag())
}
}
for _, refString := range img.RepoDigests {
ref, err := reference.ParseNormalizedNamed(refString)
if err != nil {
continue
}
if c, ok := ref.(reference.Canonical); ok {
familiarRef := reference.FamiliarName(ref)
repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String())
}
}
addImage := func(repo, tag, digest string) {
images = append(images, &imageContext{
trunc: ctx.Trunc,
i: img,
repo: repo,
tag: tag,
digest: digest,
})
}
for repo, tags := range repoTags {
digests := repoDigests[repo]
// Do not display digests as their own row
delete(repoDigests, repo)
if !needDigest(ctx) {
// Ignore digest references, just show tag once
digests = nil
}
for _, tag := range tags {
if len(digests) == 0 {
addImage(repo, tag, "<none>")
continue
}
// Display the digests for each tag
for _, dgst := range digests {
addImage(repo, tag, dgst)
}
}
}
// Show rows for remaining digest only references
for repo, digests := range repoDigests {
// If digests are displayed, show row per digest
if ctx.Digest {
for _, dgst := range digests {
addImage(repo, "<none>", dgst)
}
} else {
addImage(repo, "<none>", "")
}
}
return images
}
type imageContext struct {
HeaderContext
trunc bool
i image.Summary
repo string
tag string
digest string
}
func newImageContext() *imageContext {
imageCtx := imageContext{}
imageCtx.Header = SubHeaderContext{
"ID": imageIDHeader,
"Repository": repositoryHeader,
"Tag": tagHeader,
"Digest": digestHeader,
"CreatedSince": CreatedSinceHeader,
"CreatedAt": CreatedAtHeader,
"Size": SizeHeader,
"Containers": containersHeader,
"VirtualSize": SizeHeader, // Deprecated: VirtualSize is deprecated, and equivalent to Size.
"SharedSize": sharedSizeHeader,
"UniqueSize": uniqueSizeHeader,
}
return &imageCtx
}
func (c *imageContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *imageContext) ID() string {
if c.trunc {
return stringid.TruncateID(c.i.ID)
}
return c.i.ID
}
func (c *imageContext) Repository() string {
return c.repo
}
func (c *imageContext) Tag() string {
return c.tag
}
func (c *imageContext) Digest() string {
return c.digest
}
func (c *imageContext) CreatedSince() string {
createdAt := time.Unix(c.i.Created, 0)
if createdAt.IsZero() {
return ""
}
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
}
func (c *imageContext) CreatedAt() string {
return time.Unix(c.i.Created, 0).String()
}
func (c *imageContext) Size() string {
return units.HumanSizeWithPrecision(float64(c.i.Size), 3)
}
func (c *imageContext) Containers() string {
if c.i.Containers == -1 {
return "N/A"
}
return strconv.FormatInt(c.i.Containers, 10)
}
// VirtualSize shows the virtual size of the image and all of its parent
// images. Starting with docker 1.10, images are self-contained, and
// the VirtualSize is identical to Size.
//
// Deprecated: VirtualSize is deprecated, and equivalent to [imageContext.Size].
func (c *imageContext) VirtualSize() string {
return units.HumanSize(float64(c.i.Size))
}
func (c *imageContext) SharedSize() string {
if c.i.SharedSize == -1 {
return "N/A"
}
return units.HumanSize(float64(c.i.SharedSize))
}
func (c *imageContext) UniqueSize() string {
if c.i.Size == -1 || c.i.SharedSize == -1 {
return "N/A"
}
return units.HumanSize(float64(c.i.Size - c.i.SharedSize))
}

View File

@ -0,0 +1,71 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package formatter
import (
"encoding/json"
"reflect"
"unicode"
"github.com/pkg/errors"
)
// MarshalJSON marshals x into json
// It differs a bit from encoding/json MarshalJSON function for formatter
func MarshalJSON(x any) ([]byte, error) {
m, err := marshalMap(x)
if err != nil {
return nil, err
}
return json.Marshal(m)
}
// marshalMap marshals x to map[string]any
func marshalMap(x any) (map[string]any, error) {
val := reflect.ValueOf(x)
if val.Kind() != reflect.Ptr {
return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind())
}
if val.IsNil() {
return nil, errors.Errorf("expected a pointer to a struct, got nil pointer")
}
valElem := val.Elem()
if valElem.Kind() != reflect.Struct {
return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind())
}
typ := val.Type()
m := make(map[string]any)
for i := 0; i < val.NumMethod(); i++ {
k, v, err := marshalForMethod(typ.Method(i), val.Method(i))
if err != nil {
return nil, err
}
if k != "" {
m[k] = v
}
}
return m, nil
}
var unmarshallableNames = map[string]struct{}{"FullHeader": {}}
// marshalForMethod returns the map key and the map value for marshalling the method.
// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()")
func marshalForMethod(typ reflect.Method, val reflect.Value) (string, any, error) {
if val.Kind() != reflect.Func {
return "", nil, errors.Errorf("expected func, got %v", val.Kind())
}
name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut()
_, blackListed := unmarshallableNames[name]
// FIXME: In text/template, (numOut == 2) is marshallable,
// if the type of the second param is error.
marshallable := unicode.IsUpper(rune(name[0])) && !blackListed &&
numIn == 0 && numOut == 1
if !marshallable {
return "", nil, nil
}
result := val.Call(make([]reflect.Value, numIn))
intf := result[0].Interface()
return name, intf, nil
}

View File

@ -0,0 +1,602 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tabwriter implements a write filter (tabwriter.Writer) that
// translates tabbed columns in input into properly aligned text.
//
// The package is using the Elastic Tabstops algorithm described at
// http://nickgravgaard.com/elastictabstops/index.html.
//
// The text/tabwriter package is frozen and is not accepting new features.
// based on https://github.com/golang/go/blob/master/src/text/tabwriter/tabwriter.go Last modified 690ac40 on 31 Jan
//nolint:gocyclo,nakedret,stylecheck,unused // ignore linting errors, so that we can stick close to upstream
package tabwriter
import (
"io"
"github.com/mattn/go-runewidth"
)
// ----------------------------------------------------------------------------
// Filter implementation
// A cell represents a segment of text terminated by tabs or line breaks.
// The text itself is stored in a separate buffer; cell only describes the
// segment's size in bytes, its width in runes, and whether it's an htab
// ('\t') terminated cell.
type cell struct {
size int // cell size in bytes
width int // cell width in runes
htab bool // true if the cell is terminated by an htab ('\t')
}
// A Writer is a filter that inserts padding around tab-delimited
// columns in its input to align them in the output.
//
// The Writer treats incoming bytes as UTF-8-encoded text consisting
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
// and newline ('\n') or formfeed ('\f') characters; both newline and
// formfeed act as line breaks.
//
// Tab-terminated cells in contiguous lines constitute a column. The
// Writer inserts padding as needed to make all cells in a column have
// the same width, effectively aligning the columns. It assumes that
// all characters have the same width, except for tabs for which a
// tabwidth must be specified. Column cells must be tab-terminated, not
// tab-separated: non-tab terminated trailing text at the end of a line
// forms a cell but that cell is not part of an aligned column.
// For instance, in this example (where | stands for a horizontal tab):
//
// aaaa|bbb|d
// aa |b |dd
// a |
// aa |cccc|eee
//
// the b and c are in distinct columns (the b column is not contiguous
// all the way). The d and e are not in a column at all (there's no
// terminating tab, nor would the column be contiguous).
//
// The Writer assumes that all Unicode code points have the same width;
// this may not be true in some fonts or if the string contains combining
// characters.
//
// If DiscardEmptyColumns is set, empty columns that are terminated
// entirely by vertical (or "soft") tabs are discarded. Columns
// terminated by horizontal (or "hard") tabs are not affected by
// this flag.
//
// If a Writer is configured to filter HTML, HTML tags and entities
// are passed through. The widths of tags and entities are
// assumed to be zero (tags) and one (entities) for formatting purposes.
//
// A segment of text may be escaped by bracketing it with Escape
// characters. The tabwriter passes escaped text segments through
// unchanged. In particular, it does not interpret any tabs or line
// breaks within the segment. If the StripEscape flag is set, the
// Escape characters are stripped from the output; otherwise they
// are passed through as well. For the purpose of formatting, the
// width of the escaped text is always computed excluding the Escape
// characters.
//
// The formfeed character acts like a newline but it also terminates
// all columns in the current line (effectively calling Flush). Tab-
// terminated cells in the next line start new columns. Unless found
// inside an HTML tag or inside an escaped text segment, formfeed
// characters appear as newlines in the output.
//
// The Writer must buffer input internally, because proper spacing
// of one line may depend on the cells in future lines. Clients must
// call Flush when done calling Write.
type Writer struct {
// configuration
output io.Writer
minwidth int
tabwidth int
padding int
padbytes [8]byte
flags uint
// current state
buf []byte // collected text excluding tabs or line breaks
pos int // buffer position up to which cell.width of incomplete cell has been computed
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
lines [][]cell // list of lines; each line is a list of cells
widths []int // list of column widths in runes - reused during formatting
}
// addLine adds a new line.
// flushed is a hint indicating whether the underlying writer was just flushed.
// If so, the previous line is not likely to be a good indicator of the new line's cells.
func (b *Writer) addLine(flushed bool) {
// Grow slice instead of appending,
// as that gives us an opportunity
// to reuse an existing []cell.
if n := len(b.lines) + 1; n <= cap(b.lines) {
b.lines = b.lines[:n]
b.lines[n-1] = b.lines[n-1][:0]
} else {
b.lines = append(b.lines, nil)
}
if !flushed {
// The previous line is probably a good indicator
// of how many cells the current line will have.
// If the current line's capacity is smaller than that,
// abandon it and make a new one.
if n := len(b.lines); n >= 2 {
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
b.lines[n-1] = make([]cell, 0, prev)
}
}
}
}
// Reset the current state.
func (b *Writer) reset() {
b.buf = b.buf[:0]
b.pos = 0
b.cell = cell{}
b.endChar = 0
b.lines = b.lines[0:0]
b.widths = b.widths[0:0]
b.addLine(true)
}
// Internal representation (current state):
//
// - all text written is appended to buf; tabs and line breaks are stripped away
// - at any given time there is a (possibly empty) incomplete cell at the end
// (the cell starts after a tab or line break)
// - cell.size is the number of bytes belonging to the cell so far
// - cell.width is text width in runes of that cell from the start of the cell to
// position pos; html tags and entities are excluded from this width if html
// filtering is enabled
// - the sizes and widths of processed text are kept in the lines list
// which contains a list of cells for each line
// - the widths list is a temporary list with current widths used during
// formatting; it is kept in Writer because it's reused
//
// |<---------- size ---------->|
// | |
// |<- width ->|<- ignored ->| |
// | | | |
// [---processed---tab------------<tag>...</tag>...]
// ^ ^ ^
// | | |
// buf start of incomplete cell pos
// Formatting can be controlled with these flags.
const (
// Ignore html tags and treat entities (starting with '&'
// and ending in ';') as single characters (width = 1).
FilterHTML uint = 1 << iota
// Strip Escape characters bracketing escaped text segments
// instead of passing them through unchanged with the text.
StripEscape
// Force right-alignment of cell content.
// Default is left-alignment.
AlignRight
// Handle empty columns as if they were not present in
// the input in the first place.
DiscardEmptyColumns
// Always use tabs for indentation columns (i.e., padding of
// leading empty cells on the left) independent of padchar.
TabIndent
// Print a vertical bar ('|') between columns (after formatting).
// Discarded columns appear as zero-width columns ("||").
Debug
)
// A Writer must be initialized with a call to Init. The first parameter (output)
// specifies the filter output. The remaining parameters control the formatting:
//
// minwidth minimal cell width including any padding
// tabwidth width of tab characters (equivalent number of spaces)
// padding the padding added to a cell before computing its width
// padchar ASCII char used for padding
// if padchar == '\t', the Writer will assume that the
// width of a '\t' in the formatted output is tabwidth,
// and cells are left-aligned independent of align_left
// (for correct-looking results, tabwidth must correspond
// to the tab width in the viewer displaying the result)
// flags formatting control
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
if minwidth < 0 || tabwidth < 0 || padding < 0 {
panic("negative minwidth, tabwidth, or padding")
}
b.output = output
b.minwidth = minwidth
b.tabwidth = tabwidth
b.padding = padding
for i := range b.padbytes {
b.padbytes[i] = padchar
}
if padchar == '\t' {
// tab padding enforces left-alignment
flags &^= AlignRight
}
b.flags = flags
b.reset()
return b
}
// debugging support (keep code around)
func (b *Writer) dump() {
pos := 0
for i, line := range b.lines {
print("(", i, ") ")
for _, c := range line {
print("[", string(b.buf[pos:pos+c.size]), "]")
pos += c.size
}
print("\n")
}
print("\n")
}
// local error wrapper so we can distinguish errors we want to return
// as errors from genuine panics (which we don't want to return as errors)
type osError struct {
err error
}
func (b *Writer) write0(buf []byte) {
n, err := b.output.Write(buf)
if n != len(buf) && err == nil {
err = io.ErrShortWrite
}
if err != nil {
panic(osError{err})
}
}
func (b *Writer) writeN(src []byte, n int) {
for n > len(src) {
b.write0(src)
n -= len(src)
}
b.write0(src[0:n])
}
var (
newline = []byte{'\n'}
tabs = []byte("\t\t\t\t\t\t\t\t")
)
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
if b.padbytes[0] == '\t' || useTabs {
// padding is done with tabs
if b.tabwidth == 0 {
return // tabs have no width - can't do any padding
}
// make cellw the smallest multiple of b.tabwidth
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
n := cellw - textw // amount of padding
if n < 0 {
panic("internal error")
}
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
return
}
// padding is done with non-tab characters
b.writeN(b.padbytes[0:], cellw-textw)
}
var vbar = []byte{'|'}
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
pos = pos0
for i := line0; i < line1; i++ {
line := b.lines[i]
// if TabIndent is set, use tabs to pad leading empty cells
useTabs := b.flags&TabIndent != 0
for j, c := range line {
if j > 0 && b.flags&Debug != 0 {
// indicate column break
b.write0(vbar)
}
if c.size == 0 {
// empty cell
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], useTabs)
}
} else {
// non-empty cell
useTabs = false
if b.flags&AlignRight == 0 { // align left
b.write0(b.buf[pos : pos+c.size])
pos += c.size
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
} else { // align right
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
b.write0(b.buf[pos : pos+c.size])
pos += c.size
}
}
}
if i+1 == len(b.lines) {
// last buffered line - we don't have a newline, so just write
// any outstanding buffered data
b.write0(b.buf[pos : pos+b.cell.size])
pos += b.cell.size
} else {
// not the last line - write newline
b.write0(newline)
}
}
return
}
// Format the text between line0 and line1 (excluding line1); pos
// is the buffer position corresponding to the beginning of line0.
// Returns the buffer position corresponding to the beginning of
// line1 and an error, if any.
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
pos = pos0
column := len(b.widths)
for this := line0; this < line1; this++ {
line := b.lines[this]
if column >= len(line)-1 {
continue
}
// cell exists in this column => this line
// has more cells than the previous line
// (the last cell per line is ignored because cells are
// tab-terminated; the last cell per line describes the
// text before the newline/formfeed and does not belong
// to a column)
// print unprinted lines until beginning of block
pos = b.writeLines(pos, line0, this)
line0 = this
// column block begin
width := b.minwidth // minimal column width
discardable := true // true if all cells in this column are empty and "soft"
for ; this < line1; this++ {
line = b.lines[this]
if column >= len(line)-1 {
break
}
// cell exists in this column
c := line[column]
// update width
if w := c.width + b.padding; w > width {
width = w
}
// update discardable
if c.width > 0 || c.htab {
discardable = false
}
}
// column block end
// discard empty columns if necessary
if discardable && b.flags&DiscardEmptyColumns != 0 {
width = 0
}
// format and print all columns to the right of this column
// (we know the widths of this column and all columns to the left)
b.widths = append(b.widths, width) // push width
pos = b.format(pos, line0, this)
b.widths = b.widths[0 : len(b.widths)-1] // pop width
line0 = this
}
// print unprinted lines until end
return b.writeLines(pos, line0, line1)
}
// Append text to current cell.
func (b *Writer) append(text []byte) {
b.buf = append(b.buf, text...)
b.cell.size += len(text)
}
// Update the cell width.
func (b *Writer) updateWidth() {
b.cell.width += runewidth.StringWidth(string(b.buf[b.pos:]))
b.pos = len(b.buf)
}
// To escape a text segment, bracket it with Escape characters.
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
// does not terminate a cell and constitutes a single character of
// width one for formatting purposes.
//
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
const Escape = '\xff'
// Start escaped mode.
func (b *Writer) startEscape(ch byte) {
switch ch {
case Escape:
b.endChar = Escape
case '<':
b.endChar = '>'
case '&':
b.endChar = ';'
}
}
// Terminate escaped mode. If the escaped text was an HTML tag, its width
// is assumed to be zero for formatting purposes; if it was an HTML entity,
// its width is assumed to be one. In all other cases, the width is the
// unicode width of the text.
func (b *Writer) endEscape() {
switch b.endChar {
case Escape:
b.updateWidth()
if b.flags&StripEscape == 0 {
b.cell.width -= 2 // don't count the Escape chars
}
case '>': // tag of zero width
case ';':
b.cell.width++ // entity, count as one rune
}
b.pos = len(b.buf)
b.endChar = 0
}
// Terminate the current cell by adding it to the list of cells of the
// current line. Returns the number of cells in that line.
func (b *Writer) terminateCell(htab bool) int {
b.cell.htab = htab
line := &b.lines[len(b.lines)-1]
*line = append(*line, b.cell)
b.cell = cell{}
return len(*line)
}
func (b *Writer) handlePanic(err *error, op string) {
if e := recover(); e != nil {
if op == "Flush" {
// If Flush ran into a panic, we still need to reset.
b.reset()
}
if nerr, ok := e.(osError); ok {
*err = nerr.err
return
}
panic("tabwriter: panic during " + op)
}
}
// Flush should be called after the last call to Write to ensure
// that any data buffered in the Writer is written to output. Any
// incomplete escape sequence at the end is considered
// complete for formatting purposes.
func (b *Writer) Flush() error {
return b.flush()
}
// flush is the internal version of Flush, with a named return value which we
// don't want to expose.
func (b *Writer) flush() (err error) {
defer b.handlePanic(&err, "Flush")
b.flushNoDefers()
return nil
}
// flushNoDefers is like flush, but without a deferred handlePanic call. This
// can be called from other methods which already have their own deferred
// handlePanic calls, such as Write, and avoid the extra defer work.
func (b *Writer) flushNoDefers() {
// add current cell if not empty
if b.cell.size > 0 {
if b.endChar != 0 {
// inside escape - terminate it even if incomplete
b.endEscape()
}
b.terminateCell(false)
}
// format contents of buffer
b.format(0, 0, len(b.lines))
b.reset()
}
var hbar = []byte("---\n")
// Write writes buf to the writer b.
// The only errors returned are ones encountered
// while writing to the underlying output stream.
func (b *Writer) Write(buf []byte) (n int, err error) {
defer b.handlePanic(&err, "Write")
// split text into cells
n = 0
for i, ch := range buf {
if b.endChar == 0 {
// outside escape
switch ch {
case '\t', '\v', '\n', '\f':
// end of cell
b.append(buf[n:i])
b.updateWidth()
n = i + 1 // ch consumed
ncells := b.terminateCell(ch == '\t')
if ch == '\n' || ch == '\f' {
// terminate line
b.addLine(ch == '\f')
if ch == '\f' || ncells == 1 {
// A '\f' always forces a flush. Otherwise, if the previous
// line has only one cell which does not have an impact on
// the formatting of the following lines (the last cell per
// line is ignored by format()), thus we can flush the
// Writer contents.
b.flushNoDefers()
if ch == '\f' && b.flags&Debug != 0 {
// indicate section break
b.write0(hbar)
}
}
}
case Escape:
// start of escaped sequence
b.append(buf[n:i])
b.updateWidth()
n = i
if b.flags&StripEscape != 0 {
n++ // strip Escape
}
b.startEscape(Escape)
case '<', '&':
// possibly an html tag/entity
if b.flags&FilterHTML != 0 {
// begin of tag/entity
b.append(buf[n:i])
b.updateWidth()
n = i
b.startEscape(ch)
}
}
} else if ch == b.endChar {
// inside escape
// end of tag/entity
j := i + 1
if ch == Escape && b.flags&StripEscape != 0 {
j = i // strip Escape
}
b.append(buf[n:j])
n = i + 1 // ch consumed
b.endEscape()
}
}
// append leftover text
b.append(buf[n:])
n = len(buf)
return
}
// NewWriter allocates and initializes a new tabwriter.Writer.
// The parameters are the same as for the Init function.
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
}

View File

@ -0,0 +1,165 @@
package formatter
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/volume"
units "github.com/docker/go-units"
)
const (
defaultVolumeQuietFormat = "{{.Name}}"
defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
idHeader = "ID"
volumeNameHeader = "VOLUME NAME"
mountpointHeader = "MOUNTPOINT"
linksHeader = "LINKS"
groupHeader = "GROUP"
availabilityHeader = "AVAILABILITY"
statusHeader = "STATUS"
)
// NewVolumeFormat returns a format for use with a volume Context
func NewVolumeFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultVolumeQuietFormat
}
return defaultVolumeTableFormat
case RawFormatKey:
if quiet {
return `name: {{.Name}}`
}
return `name: {{.Name}}\ndriver: {{.Driver}}\n`
}
return Format(source)
}
// VolumeWrite writes formatted volumes using the Context
func VolumeWrite(ctx Context, volumes []*volume.Volume) error {
render := func(format func(subContext SubContext) error) error {
for _, vol := range volumes {
if err := format(&volumeContext{v: *vol}); err != nil {
return err
}
}
return nil
}
return ctx.Write(newVolumeContext(), render)
}
type volumeContext struct {
HeaderContext
v volume.Volume
}
func newVolumeContext() *volumeContext {
volumeCtx := volumeContext{}
volumeCtx.Header = SubHeaderContext{
"ID": idHeader,
"Name": volumeNameHeader,
"Group": groupHeader,
"Driver": DriverHeader,
"Scope": ScopeHeader,
"Availability": availabilityHeader,
"Mountpoint": mountpointHeader,
"Labels": LabelsHeader,
"Links": linksHeader,
"Size": SizeHeader,
"Status": statusHeader,
}
return &volumeCtx
}
func (c *volumeContext) MarshalJSON() ([]byte, error) {
return MarshalJSON(c)
}
func (c *volumeContext) Name() string {
return c.v.Name
}
func (c *volumeContext) Driver() string {
return c.v.Driver
}
func (c *volumeContext) Scope() string {
return c.v.Scope
}
func (c *volumeContext) Mountpoint() string {
return c.v.Mountpoint
}
func (c *volumeContext) Labels() string {
if c.v.Labels == nil {
return ""
}
joinLabels := make([]string, 0, len(c.v.Labels))
for k, v := range c.v.Labels {
joinLabels = append(joinLabels, k+"="+v)
}
return strings.Join(joinLabels, ",")
}
func (c *volumeContext) Label(name string) string {
if c.v.Labels == nil {
return ""
}
return c.v.Labels[name]
}
func (c *volumeContext) Links() string {
if c.v.UsageData == nil {
return "N/A"
}
return strconv.FormatInt(c.v.UsageData.RefCount, 10)
}
func (c *volumeContext) Size() string {
if c.v.UsageData == nil {
return "N/A"
}
return units.HumanSize(float64(c.v.UsageData.Size))
}
func (c *volumeContext) Group() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return c.v.ClusterVolume.Spec.Group
}
func (c *volumeContext) Availability() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return string(c.v.ClusterVolume.Spec.Availability)
}
func (c *volumeContext) Status() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
if c.v.ClusterVolume.Info == nil || c.v.ClusterVolume.Info.VolumeID == "" {
return "pending creation"
}
l := len(c.v.ClusterVolume.PublishStatus)
switch l {
case 0:
return "created"
case 1:
return "in use (1 node)"
default:
return fmt.Sprintf("in use (%d nodes)", l)
}
}

216
vendor/github.com/docker/cli/cli/command/registry.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
package command
import (
"bufio"
"context"
"fmt"
"io"
"os"
"runtime"
"strings"
"github.com/distribution/reference"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
configtypes "github.com/docker/cli/cli/config/types"
"github.com/docker/cli/cli/hints"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/moby/term"
"github.com/pkg/errors"
)
const patSuggest = "You can log in with your password or a Personal Access " +
"Token (PAT). Using a limited-scope PAT grants better security and is required " +
"for organizations using SSO. Learn more at https://docs.docker.com/go/access-tokens/"
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
// for the given command.
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc {
return func(ctx context.Context) (string, error) {
fmt.Fprintf(cli.Out(), "\nLogin prior to %s:\n", cmdName)
indexServer := registry.GetAuthConfigKey(index)
isDefaultRegistry := indexServer == registry.IndexServer
authConfig, err := GetDefaultAuthConfig(cli.ConfigFile(), true, indexServer, isDefaultRegistry)
if err != nil {
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
}
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
if err != nil {
return "", err
}
return registrytypes.EncodeAuthConfig(authConfig)
}
}
// ResolveAuthConfig returns auth-config for the given registry from the
// credential-store. It returns an empty AuthConfig if no credentials were
// found.
//
// It is similar to [registry.ResolveAuthConfig], but uses the credentials-
// store, instead of looking up credentials from a map.
func ResolveAuthConfig(cfg *configfile.ConfigFile, index *registrytypes.IndexInfo) registrytypes.AuthConfig {
configKey := index.Name
if index.Official {
configKey = registry.IndexServer
}
a, _ := cfg.GetAuthConfig(configKey)
return registrytypes.AuthConfig(a)
}
// GetDefaultAuthConfig gets the default auth config given a serverAddress
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
func GetDefaultAuthConfig(cfg *configfile.ConfigFile, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (registrytypes.AuthConfig, error) {
if !isDefaultRegistry {
serverAddress = credentials.ConvertToHostname(serverAddress)
}
authconfig := configtypes.AuthConfig{}
var err error
if checkCredStore {
authconfig, err = cfg.GetAuthConfig(serverAddress)
if err != nil {
return registrytypes.AuthConfig{
ServerAddress: serverAddress,
}, err
}
}
authconfig.ServerAddress = serverAddress
authconfig.IdentityToken = ""
return registrytypes.AuthConfig(authconfig), nil
}
// ConfigureAuth handles prompting of user's username and password if needed
func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes.AuthConfig, isDefaultRegistry bool) error {
// On Windows, force the use of the regular OS stdin stream.
//
// See:
// - https://github.com/moby/moby/issues/14336
// - https://github.com/moby/moby/issues/14210
// - https://github.com/moby/moby/pull/17738
//
// TODO(thaJeztah): we need to confirm if this special handling is still needed, as we may not be doing this in other places.
if runtime.GOOS == "windows" {
cli.SetIn(streams.NewIn(os.Stdin))
}
// Some links documenting this:
// - https://code.google.com/archive/p/mintty/issues/56
// - https://github.com/docker/docker/issues/15272
// - https://mintty.github.io/ (compatibility)
// Linux will hit this if you attempt `cat | docker login`, and Windows
// will hit this if you attempt docker login from mintty where stdin
// is a pipe, not a character based console.
if flPassword == "" && !cli.In().IsTerminal() {
return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
}
authconfig.Username = strings.TrimSpace(authconfig.Username)
if flUser = strings.TrimSpace(flUser); flUser == "" {
if isDefaultRegistry {
// if this is a default registry (docker hub), then display the following message.
fmt.Fprintln(cli.Out(), "Log in with your Docker ID or email address to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com/ to create one.")
if hints.Enabled() {
fmt.Fprintln(cli.Out(), patSuggest)
fmt.Fprintln(cli.Out())
}
}
promptWithDefault(cli.Out(), "Username", authconfig.Username)
var err error
flUser, err = readInput(cli.In())
if err != nil {
return err
}
if flUser == "" {
flUser = authconfig.Username
}
}
if flUser == "" {
return errors.Errorf("Error: Non-null Username Required")
}
if flPassword == "" {
oldState, err := term.SaveState(cli.In().FD())
if err != nil {
return err
}
fmt.Fprintf(cli.Out(), "Password: ")
_ = term.DisableEcho(cli.In().FD(), oldState)
defer func() {
_ = term.RestoreTerminal(cli.In().FD(), oldState)
}()
flPassword, err = readInput(cli.In())
if err != nil {
return err
}
fmt.Fprint(cli.Out(), "\n")
if flPassword == "" {
return errors.Errorf("Error: Password Required")
}
}
authconfig.Username = flUser
authconfig.Password = flPassword
return nil
}
// readInput reads, and returns user input from in. It tries to return a
// single line, not including the end-of-line bytes, and trims leading
// and trailing whitespace.
func readInput(in io.Reader) (string, error) {
line, _, err := bufio.NewReader(in).ReadLine()
if err != nil {
return "", errors.Wrap(err, "error while reading input")
}
return strings.TrimSpace(string(line)), nil
}
func promptWithDefault(out io.Writer, prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(out, "%s: ", prompt)
} else {
fmt.Fprintf(out, "%s (%s): ", prompt, configDefault)
}
}
// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete
// image. The auth configuration is serialized as a base64url encoded RFC4648,
// section 5) JSON string for sending through the X-Registry-Auth header.
//
// For details on base64url encoding, see:
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
func RetrieveAuthTokenFromImage(cfg *configfile.ConfigFile, image string) (string, error) {
// Retrieve encoded auth token from the image reference
authConfig, err := resolveAuthConfigFromImage(cfg, image)
if err != nil {
return "", err
}
encodedAuth, err := registrytypes.EncodeAuthConfig(authConfig)
if err != nil {
return "", err
}
return encodedAuth, nil
}
// resolveAuthConfigFromImage retrieves that AuthConfig using the image string
func resolveAuthConfigFromImage(cfg *configfile.ConfigFile, image string) (registrytypes.AuthConfig, error) {
registryRef, err := reference.ParseNormalizedNamed(image)
if err != nil {
return registrytypes.AuthConfig{}, err
}
repoInfo, err := registry.ParseRepositoryInfo(registryRef)
if err != nil {
return registrytypes.AuthConfig{}, err
}
return ResolveAuthConfig(cfg, repoInfo.Index), nil
}

View File

@ -0,0 +1,844 @@
package progress
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/signal"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
)
var (
numberedStates = map[swarm.TaskState]int64{
swarm.TaskStateNew: 1,
swarm.TaskStateAllocated: 2,
swarm.TaskStatePending: 3,
swarm.TaskStateAssigned: 4,
swarm.TaskStateAccepted: 5,
swarm.TaskStatePreparing: 6,
swarm.TaskStateReady: 7,
swarm.TaskStateStarting: 8,
swarm.TaskStateRunning: 9,
// The following states are not actually shown in progress
// output, but are used internally for ordering.
swarm.TaskStateComplete: 10,
swarm.TaskStateShutdown: 11,
swarm.TaskStateFailed: 12,
swarm.TaskStateRejected: 13,
}
longestState int
)
const (
maxProgress = 9
maxProgressBars = 20
maxJobProgress = 10
)
type progressUpdater interface {
update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error)
}
func init() {
for state := range numberedStates {
// for jobs, we use the "complete" state, and so it should be factored
// in to the computation of the longest state.
if (!terminalState(state) || state == swarm.TaskStateComplete) && len(state) > longestState {
longestState = len(state)
}
}
}
func terminalState(state swarm.TaskState) bool {
return numberedStates[state] > numberedStates[swarm.TaskStateRunning]
}
// ServiceProgress outputs progress information for convergence of a service.
//
//nolint:gocyclo
func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
defer progressWriter.Close()
progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false)
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt)
defer signal.Stop(sigint)
taskFilter := filters.NewArgs()
taskFilter.Add("service", serviceID)
taskFilter.Add("_up-to-date", "true")
getUpToDateTasks := func() ([]swarm.Task, error) {
return apiClient.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
}
var (
updater progressUpdater
converged bool
convergedAt time.Time
monitor = 5 * time.Second
rollback bool
message *progress.Progress
)
for {
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
if err != nil {
return err
}
if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 {
monitor = service.Spec.UpdateConfig.Monitor
}
if updater == nil {
updater, err = initializeUpdater(service, progressOut)
if err != nil {
return err
}
}
if service.UpdateStatus != nil {
switch service.UpdateStatus.State {
case swarm.UpdateStateUpdating:
rollback = false
case swarm.UpdateStateCompleted:
if !converged {
return nil
}
case swarm.UpdateStatePaused:
return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message)
case swarm.UpdateStateRollbackStarted:
if !rollback && service.UpdateStatus.Message != "" {
progressOut.WriteProgress(progress.Progress{
ID: "rollback",
Action: service.UpdateStatus.Message,
})
}
rollback = true
case swarm.UpdateStateRollbackPaused:
return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message)
case swarm.UpdateStateRollbackCompleted:
if !converged {
message = &progress.Progress{ID: "rollback", Message: service.UpdateStatus.Message}
}
rollback = true
}
}
if converged && time.Since(convergedAt) >= monitor {
progressOut.WriteProgress(progress.Progress{
ID: "verify",
Action: fmt.Sprintf("Service %s converged", serviceID),
})
if message != nil {
progressOut.WriteProgress(*message)
}
return nil
}
tasks, err := getUpToDateTasks()
if err != nil {
return err
}
activeNodes, err := getActiveNodes(ctx, apiClient)
if err != nil {
return err
}
converged, err = updater.update(service, tasks, activeNodes, rollback)
if err != nil {
return err
}
if converged {
// if the service is a job, there's no need to verify it. jobs are
// stay done once they're done. skip the verification and just end
// the progress monitoring.
//
// only job services have a non-nil job status, which means we can
// use the presence of this field to check if the service is a job
// here.
if service.JobStatus != nil {
progress.Message(progressOut, "", "job complete")
return nil
}
if convergedAt.IsZero() {
convergedAt = time.Now()
}
wait := monitor - time.Since(convergedAt)
if wait >= 0 {
progressOut.WriteProgress(progress.Progress{
// Ideally this would have no ID, but
// the progress rendering code behaves
// poorly on an "action" with no ID. It
// returns the cursor to the beginning
// of the line, so the first character
// may be difficult to read. Then the
// output is overwritten by the shell
// prompt when the command finishes.
ID: "verify",
Action: fmt.Sprintf("Waiting %d seconds to verify that tasks are stable...", wait/time.Second+1),
})
}
} else {
if !convergedAt.IsZero() {
progressOut.WriteProgress(progress.Progress{
ID: "verify",
Action: "Detected task failure",
})
}
convergedAt = time.Time{}
}
select {
case <-time.After(200 * time.Millisecond):
case <-sigint:
if !converged {
progress.Message(progressOut, "", "Operation continuing in background.")
progress.Messagef(progressOut, "", "Use `docker service ps %s` to check progress.", serviceID)
}
return nil
}
}
}
func getActiveNodes(ctx context.Context, apiClient client.APIClient) (map[string]struct{}, error) {
nodes, err := apiClient.NodeList(ctx, types.NodeListOptions{})
if err != nil {
return nil, err
}
activeNodes := make(map[string]struct{})
for _, n := range nodes {
if n.Status.State != swarm.NodeStateDown {
activeNodes[n.ID] = struct{}{}
}
}
return activeNodes, nil
}
func initializeUpdater(service swarm.Service, progressOut progress.Output) (progressUpdater, error) {
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
return &replicatedProgressUpdater{
progressOut: progressOut,
}, nil
}
if service.Spec.Mode.Global != nil {
return &globalProgressUpdater{
progressOut: progressOut,
}, nil
}
if service.Spec.Mode.ReplicatedJob != nil {
return newReplicatedJobProgressUpdater(service, progressOut), nil
}
if service.Spec.Mode.GlobalJob != nil {
return &globalJobProgressUpdater{
progressOut: progressOut,
}, nil
}
return nil, errors.New("unrecognized service mode")
}
func writeOverallProgress(progressOut progress.Output, numerator, denominator int, rollback bool) {
if rollback {
progressOut.WriteProgress(progress.Progress{
ID: "overall progress",
Action: fmt.Sprintf("rolling back update: %d out of %d tasks", numerator, denominator),
})
return
}
progressOut.WriteProgress(progress.Progress{
ID: "overall progress",
Action: fmt.Sprintf("%d out of %d tasks", numerator, denominator),
})
}
func truncError(errMsg string) string {
// Remove newlines from the error, which corrupt the output.
errMsg = strings.ReplaceAll(errMsg, "\n", " ")
// Limit the length to 75 characters, so that even on narrow terminals
// this will not overflow to the next line.
if len(errMsg) > 75 {
errMsg = errMsg[:74] + "…"
}
return errMsg
}
type replicatedProgressUpdater struct {
progressOut progress.Output
// used for mapping slots to a contiguous space
// this also causes progress bars to appear in order
slotMap map[int]int
initialized bool
done bool
}
func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) {
if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil {
return false, errors.New("no replica count")
}
replicas := *service.Spec.Mode.Replicated.Replicas
if !u.initialized {
u.slotMap = make(map[int]int)
// Draw progress bars in order
writeOverallProgress(u.progressOut, 0, int(replicas), rollback)
if replicas <= maxProgressBars {
for i := uint64(1); i <= replicas; i++ {
progress.Update(u.progressOut, fmt.Sprintf("%d/%d", i, replicas), " ")
}
}
u.initialized = true
}
tasksBySlot := u.tasksBySlot(tasks, activeNodes)
// If we had reached a converged state, check if we are still converged.
if u.done {
for _, task := range tasksBySlot {
if task.Status.State != swarm.TaskStateRunning {
u.done = false
break
}
}
}
running := uint64(0)
for _, task := range tasksBySlot {
mappedSlot := u.slotMap[task.Slot]
if mappedSlot == 0 {
mappedSlot = len(u.slotMap) + 1
u.slotMap[task.Slot] = mappedSlot
}
if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning {
running++
}
u.writeTaskProgress(task, mappedSlot, replicas)
}
if !u.done {
writeOverallProgress(u.progressOut, int(running), int(replicas), rollback)
if running == replicas {
u.done = true
}
}
return running == replicas, nil
}
func (u *replicatedProgressUpdater) tasksBySlot(tasks []swarm.Task, activeNodes map[string]struct{}) map[int]swarm.Task {
// If there are multiple tasks with the same slot number, favor the one
// with the *lowest* desired state. This can happen in restart
// scenarios.
tasksBySlot := make(map[int]swarm.Task)
for _, task := range tasks {
if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 {
continue
}
if existingTask, ok := tasksBySlot[task.Slot]; ok {
if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] {
continue
}
// If the desired states match, observed state breaks
// ties. This can happen with the "start first" service
// update mode.
if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] &&
numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] {
continue
}
}
if task.NodeID != "" {
if _, nodeActive := activeNodes[task.NodeID]; !nodeActive {
continue
}
}
tasksBySlot[task.Slot] = task
}
return tasksBySlot
}
func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlot int, replicas uint64) {
if u.done || replicas > maxProgressBars || uint64(mappedSlot) > replicas {
return
}
if task.Status.Err != "" {
u.progressOut.WriteProgress(progress.Progress{
ID: fmt.Sprintf("%d/%d", mappedSlot, replicas),
Action: truncError(task.Status.Err),
})
return
}
if !terminalState(task.DesiredState) && !terminalState(task.Status.State) {
u.progressOut.WriteProgress(progress.Progress{
ID: fmt.Sprintf("%d/%d", mappedSlot, replicas),
Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
Current: numberedStates[task.Status.State],
Total: maxProgress,
HideCounts: true,
})
}
}
type globalProgressUpdater struct {
progressOut progress.Output
initialized bool
done bool
}
func (u *globalProgressUpdater) update(_ swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) {
tasksByNode := u.tasksByNode(tasks)
// We don't have perfect knowledge of how many nodes meet the
// constraints for this service. But the orchestrator creates tasks
// for all eligible nodes at the same time, so we should see all those
// nodes represented among the up-to-date tasks.
nodeCount := len(tasksByNode)
if !u.initialized {
if nodeCount == 0 {
// Two possibilities: either the orchestrator hasn't created
// the tasks yet, or the service doesn't meet constraints for
// any node. Either way, we wait.
u.progressOut.WriteProgress(progress.Progress{
ID: "overall progress",
Action: "waiting for new tasks",
})
return false, nil
}
writeOverallProgress(u.progressOut, 0, nodeCount, rollback)
u.initialized = true
}
// If we had reached a converged state, check if we are still converged.
if u.done {
for _, task := range tasksByNode {
if task.Status.State != swarm.TaskStateRunning {
u.done = false
break
}
}
}
running := 0
for _, task := range tasksByNode {
if _, nodeActive := activeNodes[task.NodeID]; nodeActive {
if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning {
running++
}
u.writeTaskProgress(task, nodeCount)
}
}
if !u.done {
writeOverallProgress(u.progressOut, running, nodeCount, rollback)
if running == nodeCount {
u.done = true
}
}
return running == nodeCount, nil
}
func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm.Task {
// If there are multiple tasks with the same node ID, favor the one
// with the *lowest* desired state. This can happen in restart
// scenarios.
tasksByNode := make(map[string]swarm.Task)
for _, task := range tasks {
if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 {
continue
}
if existingTask, ok := tasksByNode[task.NodeID]; ok {
if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] {
continue
}
// If the desired states match, observed state breaks
// ties. This can happen with the "start first" service
// update mode.
if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] &&
numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] {
continue
}
}
tasksByNode[task.NodeID] = task
}
return tasksByNode
}
func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int) {
if u.done || nodeCount > maxProgressBars {
return
}
if task.Status.Err != "" {
u.progressOut.WriteProgress(progress.Progress{
ID: stringid.TruncateID(task.NodeID),
Action: truncError(task.Status.Err),
})
return
}
if !terminalState(task.DesiredState) && !terminalState(task.Status.State) {
u.progressOut.WriteProgress(progress.Progress{
ID: stringid.TruncateID(task.NodeID),
Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
Current: numberedStates[task.Status.State],
Total: maxProgress,
HideCounts: true,
})
}
}
// replicatedJobProgressUpdater outputs the progress of a replicated job. This
// progress consists of a few main elements.
//
// The first is the progress bar for the job as a whole. This shows the number
// of completed out of total tasks for the job. Tasks that are currently
// running are not counted.
//
// The second is the status of the "active" tasks for the job. We count a task
// as "active" if it has any non-terminal state, not just running. This is
// shown as a fraction of the maximum concurrent tasks that can be running,
// which is the less of MaxConcurrent or TotalCompletions - completed tasks.
type replicatedJobProgressUpdater struct {
progressOut progress.Output
// jobIteration is the service's job iteration, used to exclude tasks
// belonging to earlier iterations.
jobIteration uint64
// concurrent is the value of MaxConcurrent as an int. That is, the maximum
// number of tasks allowed to be run simultaneously.
concurrent int
// total is the value of TotalCompletions, the number of complete tasks
// desired.
total int
// initialized is set to true after the first time update is called. the
// first time update is called, the components of the progress UI are all
// written out in an initial pass. this ensure that they will subsequently
// be in order, no matter how they are updated.
initialized bool
// progressDigits is the number digits in total, so that we know how much
// to pad the job progress field with.
//
// when we're writing the number of completed over total tasks, we need to
// pad the numerator with spaces, so that the bar doesn't jump around.
// we'll compute that once on init, and then reuse it over and over.
//
// we compute this in the least clever way possible: convert to string
// with strconv.Itoa, then take the len.
progressDigits int
// activeDigits is the same, but for active tasks, and it applies to both
// the numerator and denominator.
activeDigits int
}
func newReplicatedJobProgressUpdater(service swarm.Service, progressOut progress.Output) *replicatedJobProgressUpdater {
u := &replicatedJobProgressUpdater{
progressOut: progressOut,
concurrent: int(*service.Spec.Mode.ReplicatedJob.MaxConcurrent),
total: int(*service.Spec.Mode.ReplicatedJob.TotalCompletions),
jobIteration: service.JobStatus.JobIteration.Index,
}
u.progressDigits = len(strconv.Itoa(u.total))
u.activeDigits = len(strconv.Itoa(u.concurrent))
return u
}
// update writes out the progress of the replicated job.
func (u *replicatedJobProgressUpdater) update(_ swarm.Service, tasks []swarm.Task, _ map[string]struct{}, _ bool) (bool, error) {
if !u.initialized {
u.writeOverallProgress(0, 0)
// only write out progress bars if there will be less than the maximum
if u.total <= maxProgressBars {
for i := 1; i <= u.total; i++ {
u.progressOut.WriteProgress(progress.Progress{
ID: fmt.Sprintf("%d/%d", i, u.total),
Action: " ",
})
}
}
u.initialized = true
}
// tasksBySlot is a mapping of slot number to the task valid for that slot.
// it deduplicated tasks occupying the same numerical slot but in different
// states.
tasksBySlot := make(map[int]swarm.Task)
for _, task := range tasks {
// first, check if the task belongs to this service iteration. skip
// tasks belonging to other iterations.
if task.JobIteration == nil || task.JobIteration.Index != u.jobIteration {
continue
}
// then, if the task is in an unknown state, ignore it.
if numberedStates[task.DesiredState] == 0 ||
numberedStates[task.Status.State] == 0 {
continue
}
// finally, check if the task already exists in the map
if existing, ok := tasksBySlot[task.Slot]; ok {
// if so, use the task with the lower actual state
if numberedStates[existing.Status.State] > numberedStates[task.Status.State] {
tasksBySlot[task.Slot] = task
}
} else {
// otherwise, just add it to the map.
tasksBySlot[task.Slot] = task
}
}
activeTasks := 0
completeTasks := 0
for i := 0; i < len(tasksBySlot); i++ {
task := tasksBySlot[i]
u.writeTaskProgress(task)
if numberedStates[task.Status.State] < numberedStates[swarm.TaskStateComplete] {
activeTasks++
}
if task.Status.State == swarm.TaskStateComplete {
completeTasks++
}
}
u.writeOverallProgress(activeTasks, completeTasks)
return completeTasks == u.total, nil
}
func (u *replicatedJobProgressUpdater) writeOverallProgress(active, completed int) {
u.progressOut.WriteProgress(progress.Progress{
ID: "job progress",
Action: fmt.Sprintf(
// * means "use the next positional arg to compute padding"
"%*d out of %d complete", u.progressDigits, completed, u.total,
),
Current: int64(completed),
Total: int64(u.total),
HideCounts: true,
})
// actualDesired is the lesser of MaxConcurrent, or the remaining tasks
actualDesired := u.total - completed
if actualDesired > u.concurrent {
actualDesired = u.concurrent
}
u.progressOut.WriteProgress(progress.Progress{
ID: "active tasks",
Action: fmt.Sprintf(
// [n] notation lets us select a specific argument, 1-indexed
// putting the [1] before the star means "make the string this
// length". putting the [2] or the [3] means "use this argument
// here"
//
// we pad both the numerator and the denominator because, as the
// job reaches its conclusion, the number of possible concurrent
// tasks will go down, as fewer than MaxConcurrent tasks are needed
// to complete the job.
"%[1]*[2]d out of %[1]*[3]d tasks", u.activeDigits, active, actualDesired,
),
})
}
func (u *replicatedJobProgressUpdater) writeTaskProgress(task swarm.Task) {
if u.total > maxProgressBars {
return
}
if task.Status.Err != "" {
u.progressOut.WriteProgress(progress.Progress{
ID: fmt.Sprintf("%d/%d", task.Slot+1, u.total),
Action: truncError(task.Status.Err),
})
return
}
u.progressOut.WriteProgress(progress.Progress{
ID: fmt.Sprintf("%d/%d", task.Slot+1, u.total),
Action: fmt.Sprintf("%-*s", longestState, task.Status.State),
Current: numberedStates[task.Status.State],
Total: maxJobProgress,
HideCounts: true,
})
}
// globalJobProgressUpdater is the progressUpdater for GlobalJob-mode services.
// Because GlobalJob services are so much simpler than ReplicatedJob services,
// this updater is in turn simpler as well.
type globalJobProgressUpdater struct {
progressOut progress.Output
// initialized is used to detect the first pass of update, and to perform
// first time initialization logic at that time.
initialized bool
// total is the total number of tasks expected for this job
total int
// progressDigits is the number of spaces to pad the numerator of the job
// progress field
progressDigits int
taskNodes map[string]struct{}
}
func (u *globalJobProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, _ bool) (bool, error) {
if !u.initialized {
// if there are not yet tasks, then return early.
if len(tasks) == 0 && len(activeNodes) != 0 {
u.progressOut.WriteProgress(progress.Progress{
ID: "job progress",
Action: "waiting for tasks",
})
return false, nil
}
// when a global job starts, all of its tasks are created at once, so
// we can use len(tasks) to know how many we're expecting.
u.taskNodes = map[string]struct{}{}
for _, task := range tasks {
// skip any tasks not belonging to this job iteration.
if task.JobIteration == nil || task.JobIteration.Index != service.JobStatus.JobIteration.Index {
continue
}
// collect the list of all node IDs for this service.
//
// basically, global jobs will execute on any new nodes that join
// the cluster in the future. to avoid making things complicated,
// we will only check the progress of the initial set of nodes. if
// any new nodes come online during the operation, we will ignore
// them.
u.taskNodes[task.NodeID] = struct{}{}
}
u.total = len(u.taskNodes)
u.progressDigits = len(strconv.Itoa(u.total))
u.writeOverallProgress(0)
u.initialized = true
}
// tasksByNodeID maps a NodeID to the latest task for that Node ID. this
// lets us pick only the latest task for any given node.
tasksByNodeID := map[string]swarm.Task{}
for _, task := range tasks {
// skip any tasks not belonging to this job iteration
if task.JobIteration == nil || task.JobIteration.Index != service.JobStatus.JobIteration.Index {
continue
}
// if the task is not on one of the initial set of nodes, ignore it.
if _, ok := u.taskNodes[task.NodeID]; !ok {
continue
}
// if there is already a task recorded for this node, choose the one
// with the lower state
if oldtask, ok := tasksByNodeID[task.NodeID]; ok {
if numberedStates[oldtask.Status.State] > numberedStates[task.Status.State] {
tasksByNodeID[task.NodeID] = task
}
} else {
tasksByNodeID[task.NodeID] = task
}
}
complete := 0
for _, task := range tasksByNodeID {
u.writeTaskProgress(task)
if task.Status.State == swarm.TaskStateComplete {
complete++
}
}
u.writeOverallProgress(complete)
return complete == u.total, nil
}
func (u *globalJobProgressUpdater) writeTaskProgress(task swarm.Task) {
if u.total > maxProgressBars {
return
}
if task.Status.Err != "" {
u.progressOut.WriteProgress(progress.Progress{
ID: task.NodeID,
Action: truncError(task.Status.Err),
})
return
}
u.progressOut.WriteProgress(progress.Progress{
ID: task.NodeID,
Action: fmt.Sprintf("%-*s", longestState, task.Status.State),
Current: numberedStates[task.Status.State],
Total: maxJobProgress,
HideCounts: true,
})
}
func (u *globalJobProgressUpdater) writeOverallProgress(complete int) {
// all tasks for a global job are active at once, so we only write out the
// total progress.
u.progressOut.WriteProgress(progress.Progress{
// see (*replicatedJobProgressUpdater).writeOverallProgress for an
// explanation of the advanced fmt use in this function.
ID: "job progress",
Action: fmt.Sprintf(
"%*d out of %d complete", u.progressDigits, complete, u.total,
),
Current: int64(complete),
Total: int64(u.total),
HideCounts: true,
})
}

View File

@ -0,0 +1,70 @@
package formatter
import (
"strconv"
"github.com/docker/cli/cli/command/formatter"
)
const (
// SwarmStackTableFormat is the default Swarm stack format
SwarmStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}"
stackServicesHeader = "SERVICES"
// TableFormatKey is an alias for formatter.TableFormatKey
TableFormatKey = formatter.TableFormatKey
)
// Context is an alias for formatter.Context
type Context = formatter.Context
// Format is an alias for formatter.Format
type Format = formatter.Format
// Stack contains deployed stack information.
type Stack struct {
// Name is the name of the stack
Name string
// Services is the number of the services
Services int
}
// StackWrite writes formatted stacks using the Context
func StackWrite(ctx formatter.Context, stacks []*Stack) error {
render := func(format func(subContext formatter.SubContext) error) error {
for _, stack := range stacks {
if err := format(&stackContext{s: stack}); err != nil {
return err
}
}
return nil
}
return ctx.Write(newStackContext(), render)
}
type stackContext struct {
formatter.HeaderContext
s *Stack
}
func newStackContext() *stackContext {
stackCtx := stackContext{}
stackCtx.Header = formatter.SubHeaderContext{
"Name": formatter.NameHeader,
"Services": stackServicesHeader,
}
return &stackCtx
}
func (s *stackContext) MarshalJSON() ([]byte, error) {
return formatter.MarshalJSON(s)
}
func (s *stackContext) Name() string {
return s.s.Name
}
func (s *stackContext) Services() string {
return strconv.Itoa(s.s.Services)
}

218
vendor/github.com/docker/cli/cli/command/telemetry.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
package command
import (
"context"
"os"
"path/filepath"
"sync"
"time"
"github.com/docker/distribution/uuid"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
)
const exportTimeout = 50 * time.Millisecond
// TracerProvider is an extension of the trace.TracerProvider interface for CLI programs.
type TracerProvider interface {
trace.TracerProvider
ForceFlush(ctx context.Context) error
Shutdown(ctx context.Context) error
}
// MeterProvider is an extension of the metric.MeterProvider interface for CLI programs.
type MeterProvider interface {
metric.MeterProvider
ForceFlush(ctx context.Context) error
Shutdown(ctx context.Context) error
}
// TelemetryClient provides the methods for using OTEL tracing or metrics.
type TelemetryClient interface {
// Resource returns the OTEL Resource configured with this TelemetryClient.
// This resource may be created lazily, but the resource should be the same
// each time this function is invoked.
Resource() *resource.Resource
// TracerProvider returns the currently initialized TracerProvider. This TracerProvider will be configured
// with the default tracing components for a CLI program
TracerProvider() trace.TracerProvider
// MeterProvider returns the currently initialized MeterProvider. This MeterProvider will be configured
// with the default metric components for a CLI program
MeterProvider() metric.MeterProvider
}
func (cli *DockerCli) Resource() *resource.Resource {
return cli.res.Get()
}
func (cli *DockerCli) TracerProvider() trace.TracerProvider {
return otel.GetTracerProvider()
}
func (cli *DockerCli) MeterProvider() metric.MeterProvider {
return otel.GetMeterProvider()
}
// WithResourceOptions configures additional options for the default resource. The default
// resource will continue to include its default options.
func WithResourceOptions(opts ...resource.Option) CLIOption {
return func(cli *DockerCli) error {
cli.res.AppendOptions(opts...)
return nil
}
}
// WithResource overwrites the default resource and prevents its creation.
func WithResource(res *resource.Resource) CLIOption {
return func(cli *DockerCli) error {
cli.res.Set(res)
return nil
}
}
type telemetryResource struct {
res *resource.Resource
opts []resource.Option
once sync.Once
}
func (r *telemetryResource) Set(res *resource.Resource) {
r.res = res
}
func (r *telemetryResource) Get() *resource.Resource {
r.once.Do(r.init)
return r.res
}
func (r *telemetryResource) init() {
if r.res != nil {
r.opts = nil
return
}
opts := append(defaultResourceOptions(), r.opts...)
res, err := resource.New(context.Background(), opts...)
if err != nil {
otel.Handle(err)
}
r.res = res
// Clear the resource options since they'll never be used again and to allow
// the garbage collector to retrieve that memory.
r.opts = nil
}
// createGlobalMeterProvider creates a new MeterProvider from the initialized DockerCli struct
// with the given options and sets it as the global meter provider
func (cli *DockerCli) createGlobalMeterProvider(ctx context.Context, opts ...sdkmetric.Option) {
allOpts := make([]sdkmetric.Option, 0, len(opts)+2)
allOpts = append(allOpts, sdkmetric.WithResource(cli.Resource()))
allOpts = append(allOpts, dockerMetricExporter(ctx, cli)...)
allOpts = append(allOpts, opts...)
mp := sdkmetric.NewMeterProvider(allOpts...)
otel.SetMeterProvider(mp)
}
// createGlobalTracerProvider creates a new TracerProvider from the initialized DockerCli struct
// with the given options and sets it as the global tracer provider
func (cli *DockerCli) createGlobalTracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) {
allOpts := make([]sdktrace.TracerProviderOption, 0, len(opts)+2)
allOpts = append(allOpts, sdktrace.WithResource(cli.Resource()))
allOpts = append(allOpts, dockerSpanExporter(ctx, cli)...)
allOpts = append(allOpts, opts...)
tp := sdktrace.NewTracerProvider(allOpts...)
otel.SetTracerProvider(tp)
}
func defaultResourceOptions() []resource.Option {
return []resource.Option{
resource.WithDetectors(serviceNameDetector{}),
resource.WithAttributes(
// Use a unique instance id so OTEL knows that each invocation
// of the CLI is its own instance. Without this, downstream
// OTEL processors may think the same process is restarting
// continuously.
semconv.ServiceInstanceID(uuid.Generate().String()),
),
resource.WithFromEnv(),
resource.WithTelemetrySDK(),
}
}
func (r *telemetryResource) AppendOptions(opts ...resource.Option) {
if r.res != nil {
return
}
r.opts = append(r.opts, opts...)
}
type serviceNameDetector struct{}
func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) {
return resource.StringDetector(
semconv.SchemaURL,
semconv.ServiceNameKey,
func() (string, error) {
return filepath.Base(os.Args[0]), nil
},
).Detect(ctx)
}
// cliReader is an implementation of Reader that will automatically
// report to a designated Exporter when Shutdown is called.
type cliReader struct {
sdkmetric.Reader
exporter sdkmetric.Exporter
}
func newCLIReader(exp sdkmetric.Exporter) sdkmetric.Reader {
reader := sdkmetric.NewManualReader(
sdkmetric.WithTemporalitySelector(deltaTemporality),
)
return &cliReader{
Reader: reader,
exporter: exp,
}
}
func (r *cliReader) Shutdown(ctx context.Context) error {
// Place a pretty tight constraint on the actual reporting.
// We don't want CLI metrics to prevent the CLI from exiting
// so if there's some kind of issue we need to abort pretty
// quickly.
ctx, cancel := context.WithTimeout(ctx, exportTimeout)
defer cancel()
return r.ForceFlush(ctx)
}
func (r *cliReader) ForceFlush(ctx context.Context) error {
var rm metricdata.ResourceMetrics
if err := r.Reader.Collect(ctx, &rm); err != nil {
return err
}
return r.exporter.Export(ctx, &rm)
}
// deltaTemporality sets the Temporality of every instrument to delta.
//
// This isn't really needed since we create a unique resource on each invocation,
// but it can help with cardinality concerns for downstream processors since they can
// perform aggregation for a time interval and then discard the data once that time
// period has passed. Cumulative temporality would imply to the downstream processor
// that they might receive a successive point and they may unnecessarily keep state
// they really shouldn't.
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality {
return metricdata.DeltaTemporality
}

View File

@ -0,0 +1,137 @@
// FIXME(jsternberg): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package command
import (
"context"
"net/url"
"os"
"path"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
const (
otelContextFieldName string = "otel"
otelExporterOTLPEndpoint string = "OTEL_EXPORTER_OTLP_ENDPOINT"
debugEnvVarPrefix string = "DOCKER_CLI_"
)
// dockerExporterOTLPEndpoint retrieves the OTLP endpoint used for the docker reporter
// from the current context.
func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) {
meta, err := cli.ContextStore().GetMetadata(cli.CurrentContext())
if err != nil {
otel.Handle(err)
return "", false
}
var otelCfg any
switch m := meta.Metadata.(type) {
case DockerContext:
otelCfg = m.AdditionalFields[otelContextFieldName]
case map[string]any:
otelCfg = m[otelContextFieldName]
}
if otelCfg != nil {
otelMap, ok := otelCfg.(map[string]any)
if !ok {
otel.Handle(errors.Errorf(
"unexpected type for field %q: %T (expected: %T)",
otelContextFieldName,
otelCfg,
otelMap,
))
}
// keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
endpoint, _ = otelMap[otelExporterOTLPEndpoint].(string)
}
// Override with env var value if it exists AND IS SET
// (ignore otel defaults for this override when the key exists but is empty)
if override := os.Getenv(debugEnvVarPrefix + otelExporterOTLPEndpoint); override != "" {
endpoint = override
}
if endpoint == "" {
return "", false
}
// Parse the endpoint. The docker config expects the endpoint to be
// in the form of a URL to match the environment variable, but this
// option doesn't correspond directly to WithEndpoint.
//
// We pretend we're the same as the environment reader.
u, err := url.Parse(endpoint)
if err != nil {
otel.Handle(errors.Errorf("docker otel endpoint is invalid: %s", err))
return "", false
}
switch u.Scheme {
case "unix":
// Unix sockets are a bit weird. OTEL seems to imply they
// can be used as an environment variable and are handled properly,
// but they don't seem to be as the behavior of the environment variable
// is to strip the scheme from the endpoint, but the underlying implementation
// needs the scheme to use the correct resolver.
//
// We'll just handle this in a special way and add the unix:// back to the endpoint.
endpoint = "unix://" + path.Join(u.Host, u.Path)
case "https":
secure = true
fallthrough
case "http":
endpoint = path.Join(u.Host, u.Path)
}
return endpoint, secure
}
func dockerSpanExporter(ctx context.Context, cli Cli) []sdktrace.TracerProviderOption {
endpoint, secure := dockerExporterOTLPEndpoint(cli)
if endpoint == "" {
return nil
}
opts := []otlptracegrpc.Option{
otlptracegrpc.WithEndpoint(endpoint),
}
if !secure {
opts = append(opts, otlptracegrpc.WithInsecure())
}
exp, err := otlptracegrpc.New(ctx, opts...)
if err != nil {
otel.Handle(err)
return nil
}
return []sdktrace.TracerProviderOption{sdktrace.WithBatcher(exp, sdktrace.WithExportTimeout(exportTimeout))}
}
func dockerMetricExporter(ctx context.Context, cli Cli) []sdkmetric.Option {
endpoint, secure := dockerExporterOTLPEndpoint(cli)
if endpoint == "" {
return nil
}
opts := []otlpmetricgrpc.Option{
otlpmetricgrpc.WithEndpoint(endpoint),
}
if !secure {
opts = append(opts, otlpmetricgrpc.WithInsecure())
}
exp, err := otlpmetricgrpc.New(ctx, opts...)
if err != nil {
otel.Handle(err)
return nil
}
return []sdkmetric.Option{sdkmetric.WithReader(newCLIReader(exp))}
}

View File

@ -0,0 +1,25 @@
package command
// WithEnableGlobalMeterProvider configures the DockerCli to create a new
// MeterProvider from the initialized DockerCli struct, and set it as
// the global meter provider.
//
// WARNING: For internal use, don't depend on this.
func WithEnableGlobalMeterProvider() CLIOption {
return func(cli *DockerCli) error {
cli.enableGlobalMeter = true
return nil
}
}
// WithEnableGlobalTracerProvider configures the DockerCli to create a new
// TracerProvider from the initialized DockerCli struct, and set it as
// the global tracer provider.
//
// WARNING: For internal use, don't depend on this.
func WithEnableGlobalTracerProvider() CLIOption {
return func(cli *DockerCli) error {
cli.enableGlobalTracer = true
return nil
}
}

View File

@ -0,0 +1,179 @@
package command
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/cli/cli/version"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
)
// BaseCommandAttributes returns an attribute.Set containing attributes to attach to metrics/traces
func BaseCommandAttributes(cmd *cobra.Command, streams Streams) []attribute.KeyValue {
return append([]attribute.KeyValue{
attribute.String("command.name", getCommandName(cmd)),
}, stdioAttributes(streams)...)
}
// InstrumentCobraCommands wraps all cobra commands' RunE funcs to set a command duration metric using otel.
//
// Note: this should be the last func to wrap/modify the PersistentRunE/RunE funcs before command execution.
//
// can also be used for spans!
func (cli *DockerCli) InstrumentCobraCommands(ctx context.Context, cmd *cobra.Command) {
// If PersistentPreRunE is nil, make it execute PersistentPreRun and return nil by default
ogPersistentPreRunE := cmd.PersistentPreRunE
if ogPersistentPreRunE == nil {
ogPersistentPreRun := cmd.PersistentPreRun
//nolint:unparam // necessary because error will always be nil here
ogPersistentPreRunE = func(cmd *cobra.Command, args []string) error {
ogPersistentPreRun(cmd, args)
return nil
}
cmd.PersistentPreRun = nil
}
// wrap RunE in PersistentPreRunE so that this operation gets executed on all children commands
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
// If RunE is nil, make it execute Run and return nil by default
ogRunE := cmd.RunE
if ogRunE == nil {
ogRun := cmd.Run
//nolint:unparam // necessary because error will always be nil here
ogRunE = func(cmd *cobra.Command, args []string) error {
ogRun(cmd, args)
return nil
}
cmd.Run = nil
}
cmd.RunE = func(cmd *cobra.Command, args []string) error {
// start the timer as the first step of every cobra command
stopInstrumentation := cli.StartInstrumentation(cmd)
cmdErr := ogRunE(cmd, args)
stopInstrumentation(cmdErr)
return cmdErr
}
return ogPersistentPreRunE(cmd, args)
}
}
// StartInstrumentation instruments CLI commands with the individual metrics and spans configured.
// It's the main command OTel utility, and new command-related metrics should be added to it.
// It should be called immediately before command execution, and returns a stopInstrumentation function
// that must be called with the error resulting from the command execution.
func (cli *DockerCli) StartInstrumentation(cmd *cobra.Command) (stopInstrumentation func(error)) {
baseAttrs := BaseCommandAttributes(cmd, cli)
return startCobraCommandTimer(cli.MeterProvider(), baseAttrs)
}
func startCobraCommandTimer(mp metric.MeterProvider, attrs []attribute.KeyValue) func(err error) {
meter := getDefaultMeter(mp)
durationCounter, _ := meter.Float64Counter(
"command.time",
metric.WithDescription("Measures the duration of the cobra command"),
metric.WithUnit("ms"),
)
start := time.Now()
return func(err error) {
// Use a new context for the export so that the command being cancelled
// doesn't affect the metrics, and we get metrics for cancelled commands.
ctx, cancel := context.WithTimeout(context.Background(), exportTimeout)
defer cancel()
duration := float64(time.Since(start)) / float64(time.Millisecond)
cmdStatusAttrs := attributesFromError(err)
durationCounter.Add(ctx, duration,
metric.WithAttributes(attrs...),
metric.WithAttributes(cmdStatusAttrs...),
)
if mp, ok := mp.(MeterProvider); ok {
mp.ForceFlush(ctx)
}
}
}
func stdioAttributes(streams Streams) []attribute.KeyValue {
return []attribute.KeyValue{
attribute.Bool("command.stdin.isatty", streams.In().IsTerminal()),
attribute.Bool("command.stdout.isatty", streams.Out().IsTerminal()),
attribute.Bool("command.stderr.isatty", streams.Err().IsTerminal()),
}
}
func attributesFromError(err error) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
exitCode := 0
if err != nil {
exitCode = 1
if stderr, ok := err.(statusError); ok {
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so only set this here if this value isn't 0
if stderr.StatusCode != 0 {
exitCode = stderr.StatusCode
}
}
attrs = append(attrs, attribute.String("command.error.type", otelErrorType(err)))
}
attrs = append(attrs, attribute.Int("command.status.code", exitCode))
return attrs
}
// otelErrorType returns an attribute for the error type based on the error category.
func otelErrorType(err error) string {
name := "generic"
if errors.Is(err, context.Canceled) {
name = "canceled"
}
return name
}
// statusError reports an unsuccessful exit by a command.
type statusError struct {
Status string
StatusCode int
}
func (e statusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}
// getCommandName gets the cobra command name in the format
// `... parentCommandName commandName` by traversing it's parent commands recursively.
// until the root command is reached.
//
// Note: The root command's name is excluded. If cmd is the root cmd, return ""
func getCommandName(cmd *cobra.Command) string {
fullCmdName := getFullCommandName(cmd)
i := strings.Index(fullCmdName, " ")
if i == -1 {
return ""
}
return fullCmdName[i+1:]
}
// getFullCommandName gets the full cobra command name in the format
// `... parentCommandName commandName` by traversing it's parent commands recursively
// until the root command is reached.
func getFullCommandName(cmd *cobra.Command) string {
if cmd.HasParent() {
return fmt.Sprintf("%s %s", getFullCommandName(cmd.Parent()), cmd.Name())
}
return cmd.Name()
}
// getDefaultMeter gets the default metric.Meter for the application
// using the given metric.MeterProvider
func getDefaultMeter(mp metric.MeterProvider) metric.Meter {
return mp.Meter(
"github.com/docker/cli",
metric.WithInstrumentationVersion(version.Version),
)
}

15
vendor/github.com/docker/cli/cli/command/trust.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package command
import (
"github.com/spf13/pflag"
)
// AddTrustVerificationFlags adds content trust flags to the provided flagset
func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) {
fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification")
}
// AddTrustSigningFlags adds "signing" flags to the provided flagset
func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) {
fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing")
}

243
vendor/github.com/docker/cli/cli/command/utils.go generated vendored Normal file
View File

@ -0,0 +1,243 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package command
import (
"bufio"
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/api/types/filters"
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/moby/sys/sequential"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// CopyToFile writes the content of the reader to the specified file
func CopyToFile(outfile string, r io.Reader) error {
// We use sequential file access here to avoid depleting the standby list
// on Windows. On Linux, this is a call directly to os.CreateTemp
tmpFile, err := sequential.CreateTemp(filepath.Dir(outfile), ".docker_temp_")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil {
os.Remove(tmpPath)
return err
}
if err = os.Rename(tmpPath, outfile); err != nil {
os.Remove(tmpPath)
return err
}
return nil
}
// capitalizeFirst capitalizes the first character of string
func capitalizeFirst(s string) string {
switch l := len(s); l {
case 0:
return s
case 1:
return strings.ToLower(s)
default:
return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
}
}
// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
func PrettyPrint(i any) string {
switch t := i.(type) {
case nil:
return "None"
case string:
return capitalizeFirst(t)
default:
return capitalizeFirst(fmt.Sprintf("%s", t))
}
}
var ErrPromptTerminated = errdefs.Cancelled(errors.New("prompt terminated"))
// PromptForConfirmation requests and checks confirmation from the user.
// This will display the provided message followed by ' [y/N] '. If the user
// input 'y' or 'Y' it returns true otherwise false. If no message is provided,
// "Are you sure you want to proceed? [y/N] " will be used instead.
//
// If the user terminates the CLI with SIGINT or SIGTERM while the prompt is
// active, the prompt will return false with an ErrPromptTerminated error.
// When the prompt returns an error, the caller should propagate the error up
// the stack and close the io.Reader used for the prompt which will prevent the
// background goroutine from blocking indefinitely.
func PromptForConfirmation(ctx context.Context, ins io.Reader, outs io.Writer, message string) (bool, error) {
if message == "" {
message = "Are you sure you want to proceed?"
}
message += " [y/N] "
_, _ = fmt.Fprint(outs, message)
// On Windows, force the use of the regular OS stdin stream.
if runtime.GOOS == "windows" {
ins = streams.NewIn(os.Stdin)
}
result := make(chan bool)
go func() {
var res bool
scanner := bufio.NewScanner(ins)
if scanner.Scan() {
answer := strings.TrimSpace(scanner.Text())
if strings.EqualFold(answer, "y") {
res = true
}
}
result <- res
}()
select {
case <-ctx.Done():
_, _ = fmt.Fprintln(outs, "")
return false, ErrPromptTerminated
case r := <-result:
return r, nil
}
}
// PruneFilters returns consolidated prune filters obtained from config.json and cli
func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
if dockerCli.ConfigFile() == nil {
return pruneFilters
}
for _, f := range dockerCli.ConfigFile().PruneFilters {
k, v, ok := strings.Cut(f, "=")
if !ok {
continue
}
if k == "label" {
// CLI label filter supersede config.json.
// If CLI label filter conflict with config.json,
// skip adding label! filter in config.json.
if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", v) {
continue
}
} else if k == "label!" {
// CLI label! filter supersede config.json.
// If CLI label! filter conflict with config.json,
// skip adding label filter in config.json.
if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", v) {
continue
}
}
pruneFilters.Add(k, v)
}
return pruneFilters
}
// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later.
func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
flags.SetAnnotation("platform", "version", []string{"1.32"})
}
// ValidateOutputPath validates the output paths of the `export` and `save` commands.
func ValidateOutputPath(path string) error {
dir := filepath.Dir(filepath.Clean(path))
if dir != "" && dir != "." {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return errors.Errorf("invalid output path: directory %q does not exist", dir)
}
}
// check whether `path` points to a regular file
// (if the path exists and doesn't point to a directory)
if fileInfo, err := os.Stat(path); !os.IsNotExist(err) {
if err != nil {
return err
}
if fileInfo.Mode().IsDir() || fileInfo.Mode().IsRegular() {
return nil
}
if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil {
return errors.Wrapf(err, fmt.Sprintf("invalid output path: %q must be a directory or a regular file", path))
}
}
return nil
}
// ValidateOutputPathFileMode validates the output paths of the `cp` command and serves as a
// helper to `ValidateOutputPath`
func ValidateOutputPathFileMode(fileMode os.FileMode) error {
switch {
case fileMode&os.ModeDevice != 0:
return errors.New("got a device")
case fileMode&os.ModeIrregular != 0:
return errors.New("got an irregular file")
}
return nil
}
func stringSliceIndex(s, subs []string) int {
j := 0
if len(subs) > 0 {
for i, x := range s {
if j < len(subs) && subs[j] == x {
j++
} else {
j = 0
}
if len(subs) == j {
return i + 1 - j
}
}
}
return -1
}
// StringSliceReplaceAt replaces the sub-slice find, with the sub-slice replace, in the string
// slice s, returning a new slice and a boolean indicating if the replacement happened.
// requireIdx is the index at which old needs to be found at (or -1 to disregard that).
func StringSliceReplaceAt(s, find, replace []string, requireIndex int) ([]string, bool) {
idx := stringSliceIndex(s, find)
if (requireIndex != -1 && requireIndex != idx) || idx == -1 {
return s, false
}
out := append([]string{}, s[:idx]...)
out = append(out, replace...)
out = append(out, s[idx+len(find):]...)
return out, true
}
// ValidateMountWithAPIVersion validates a mount with the server API version.
func ValidateMountWithAPIVersion(m mounttypes.Mount, serverAPIVersion string) error {
if m.BindOptions != nil {
if m.BindOptions.NonRecursive && versions.LessThan(serverAPIVersion, "1.40") {
return errors.Errorf("bind-recursive=disabled requires API v1.40 or later")
}
// ReadOnlyNonRecursive can be safely ignored when API < 1.44
if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(serverAPIVersion, "1.44") {
return errors.Errorf("bind-recursive=readonly requires API v1.44 or later")
}
}
return nil
}

View File

@ -0,0 +1,164 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package interpolation
import (
"os"
"strings"
"github.com/docker/cli/cli/compose/template"
"github.com/pkg/errors"
)
// Options supported by Interpolate
type Options struct {
// LookupValue from a key
LookupValue LookupValue
// TypeCastMapping maps key paths to functions to cast to a type
TypeCastMapping map[Path]Cast
// Substitution function to use
Substitute func(string, template.Mapping) (string, error)
}
// LookupValue is a function which maps from variable names to values.
// Returns the value as a string and a bool indicating whether
// the value is present, to distinguish between an empty string
// and the absence of a value.
type LookupValue func(key string) (string, bool)
// Cast a value to a new type, or return an error if the value can't be cast
type Cast func(value string) (any, error)
// Interpolate replaces variables in a string with the values from a mapping
func Interpolate(config map[string]any, opts Options) (map[string]any, error) {
if opts.LookupValue == nil {
opts.LookupValue = os.LookupEnv
}
if opts.TypeCastMapping == nil {
opts.TypeCastMapping = make(map[Path]Cast)
}
if opts.Substitute == nil {
opts.Substitute = template.Substitute
}
out := map[string]any{}
for key, value := range config {
interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts)
if err != nil {
return out, err
}
out[key] = interpolatedValue
}
return out, nil
}
func recursiveInterpolate(value any, path Path, opts Options) (any, error) {
switch value := value.(type) {
case string:
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
if err != nil || newValue == value {
return value, newPathError(path, err)
}
caster, ok := opts.getCasterForPath(path)
if !ok {
return newValue, nil
}
casted, err := caster(newValue)
return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
case map[string]any:
out := map[string]any{}
for key, elem := range value {
interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts)
if err != nil {
return nil, err
}
out[key] = interpolatedElem
}
return out, nil
case []any:
out := make([]any, len(value))
for i, elem := range value {
interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts)
if err != nil {
return nil, err
}
out[i] = interpolatedElem
}
return out, nil
default:
return value, nil
}
}
func newPathError(path Path, err error) error {
switch err := err.(type) {
case nil:
return nil
case *template.InvalidTemplateError:
return errors.Errorf(
"invalid interpolation format for %s: %#v; you may need to escape any $ with another $",
path, err.Template)
default:
return errors.Wrapf(err, "error while interpolating %s", path)
}
}
const pathSeparator = "."
// PathMatchAll is a token used as part of a Path to match any key at that level
// in the nested structure
const PathMatchAll = "*"
// PathMatchList is a token used as part of a Path to match items in a list
const PathMatchList = "[]"
// Path is a dotted path of keys to a value in a nested mapping structure. A *
// section in a path will match any key in the mapping structure.
type Path string
// NewPath returns a new Path
func NewPath(items ...string) Path {
return Path(strings.Join(items, pathSeparator))
}
// Next returns a new path by append part to the current path
func (p Path) Next(part string) Path {
return Path(string(p) + pathSeparator + part)
}
func (p Path) parts() []string {
return strings.Split(string(p), pathSeparator)
}
func (p Path) matches(pattern Path) bool {
patternParts := pattern.parts()
parts := p.parts()
if len(patternParts) != len(parts) {
return false
}
for index, part := range parts {
switch patternParts[index] {
case PathMatchAll, part:
continue
default:
return false
}
}
return true
}
func (o Options) getCasterForPath(path Path) (Cast, bool) {
for pattern, caster := range o.TypeCastMapping {
if path.matches(pattern) {
return caster, true
}
}
return nil, false
}

View File

@ -0,0 +1,8 @@
# passed through
FOO=foo_from_env_file
# overridden in example2.env
BAR=bar_from_env_file
# overridden in full-example.yml
BAZ=baz_from_env_file

View File

@ -0,0 +1,4 @@
BAR=bar_from_env_file_2
# overridden in configDetails.Environment
QUX=quz_from_env_file_2

View File

@ -0,0 +1,452 @@
version: "3.13"
services:
foo:
build:
context: ./dir
dockerfile: Dockerfile
args:
foo: bar
target: foo
network: foo
cache_from:
- foo
- bar
extra_hosts:
- "ipv4.example.com:127.0.0.1"
- "ipv6.example.com:::1"
labels: [FOO=BAR]
cap_add:
- ALL
cap_drop:
- NET_ADMIN
- SYS_ADMIN
cgroup_parent: m-executor-abcd
# String or list
command: bundle exec thin -p 3000
# command: ["bundle", "exec", "thin", "-p", "3000"]
configs:
- config1
- source: config2
target: /my_config
uid: '103'
gid: '103'
mode: 0440
container_name: my-web-container
depends_on:
- db
- redis
deploy:
mode: replicated
replicas: 6
labels: [FOO=BAR]
rollback_config:
parallelism: 3
delay: 10s
failure_action: continue
monitor: 60s
max_failure_ratio: 0.3
order: start-first
update_config:
parallelism: 3
delay: 10s
failure_action: continue
monitor: 60s
max_failure_ratio: 0.3
order: start-first
resources:
limits:
cpus: '0.001'
memory: 50M
pids: 100
reservations:
cpus: '0.0001'
memory: 20M
generic_resources:
- discrete_resource_spec:
kind: 'gpu'
value: 2
- discrete_resource_spec:
kind: 'ssd'
value: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
placement:
constraints: [node=foo]
max_replicas_per_node: 5
preferences:
- spread: node.labels.az
endpoint_mode: dnsrr
devices:
- "/dev/ttyUSB0:/dev/ttyUSB0"
# String or list
# dns: 8.8.8.8
dns:
- 8.8.8.8
- 9.9.9.9
# String or list
# dns_search: example.com
dns_search:
- dc1.example.com
- dc2.example.com
domainname: foo.com
# String or list
# entrypoint: /code/entrypoint.sh -p 3000
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
# String or list
# env_file: .env
env_file:
- ./example1.env
- ./example2.env
# Mapping or list
# Mapping values can be strings, numbers or null
# Booleans are not allowed - must be quoted
environment:
BAZ: baz_from_service_def
QUX:
# environment:
# - RACK_ENV=development
# - SHOW=true
# - SESSION_SECRET
# Items can be strings or numbers
expose:
- "3000"
- 8000
external_links:
- redis_1
- project_db_1:mysql
- project_db_1:postgresql
# Mapping or list
# Mapping values must be strings
# extra_hosts:
# somehost: "162.242.195.82"
# otherhost: "50.31.209.229"
# host.docker.internal: "host-gateway"
extra_hosts:
- "somehost:162.242.195.82"
- "otherhost:50.31.209.229"
- "host.docker.internal:host-gateway"
hostname: foo
healthcheck:
test: echo "hello world"
interval: 10s
timeout: 1s
retries: 5
start_period: 15s
start_interval: 1s
# Any valid image reference - repo, tag, id, sha
image: redis
# image: ubuntu:14.04
# image: tutum/influxdb
# image: example-registry.com:4000/postgresql
# image: a4bc65fd
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
ipc: host
# Mapping or list
# Mapping values can be strings, numbers or null
labels:
com.example.description: "Accounting webapp"
com.example.number: 42
com.example.empty-label:
# labels:
# - "com.example.description=Accounting webapp"
# - "com.example.number=42"
# - "com.example.empty-label"
links:
- db
- db:database
- redis
logging:
driver: syslog
options:
syslog-address: "tcp://192.168.0.42:123"
mac_address: 02:42:ac:11:65:43
# network_mode: "bridge"
# network_mode: "host"
# network_mode: "none"
# Use the network mode of an arbitrary container from another service
# network_mode: "service:db"
# Use the network mode of another container, specified by name or id
# network_mode: "container:some-container"
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
networks:
some-network:
aliases:
- alias1
- alias3
driver_opts:
"driveropt1": "optval1"
"driveropt2": "optval2"
other-network:
ipv4_address: 172.16.238.10
ipv6_address: 2001:3984:3989::10
other-other-network:
pid: "host"
ports:
- 3000
- "3001-3005"
- "8000:8000"
- "9090-9091:8080-8081"
- "49100:22"
- "127.0.0.1:8001:8001"
- "127.0.0.1:5000-5010:5000-5010"
privileged: true
read_only: true
restart: always
secrets:
- secret1
- source: secret2
target: my_secret
uid: '103'
gid: '103'
mode: 0440
security_opt:
- label=level:s0:c100,c200
- label=type:svirt_apache_t
stdin_open: true
stop_grace_period: 20s
stop_signal: SIGUSR1
sysctls:
net.core.somaxconn: 1024
net.ipv4.tcp_syncookies: 0
# String or list
# tmpfs: /run
tmpfs:
- /run
- /tmp
tty: true
ulimits:
# Single number or mapping with soft + hard limits
nproc: 65535
nofile:
soft: 20000
hard: 40000
user: someone
volumes:
# Just specify a path and let the Engine create a volume
- /var/lib/mysql
# Specify an absolute path mapping
- /opt/data:/var/lib/mysql
# Path on the host, relative to the Compose file
- .:/code
- ./static:/var/www/html
# User-relative path
- ~/configs:/etc/configs/:ro
# Named volume
- datavolume:/var/lib/mysql
- type: bind
source: ./opt
target: /opt
consistency: cached
- type: tmpfs
target: /opt
tmpfs:
size: 10000
- type: cluster
source: group:mygroup
target: /srv
working_dir: /code
x-bar: baz
x-foo: bar
networks:
# Entries can be null, which specifies simply that a network
# called "{project name}_some-network" should be created and
# use the default driver
some-network:
other-network:
driver: overlay
driver_opts:
# Values can be strings or numbers
foo: "bar"
baz: 1
ipam:
driver: overlay
# driver_opts:
# # Values can be strings or numbers
# com.docker.network.enable_ipv6: "true"
# com.docker.network.numeric_value: 1
config:
- subnet: 172.16.238.0/24
# gateway: 172.16.238.1
- subnet: 2001:3984:3989::/64
# gateway: 2001:3984:3989::1
labels:
foo: bar
external-network:
# Specifies that a pre-existing network called "external-network"
# can be referred to within this file as "external-network"
external: true
other-external-network:
# Specifies that a pre-existing network called "my-cool-network"
# can be referred to within this file as "other-external-network"
external:
name: my-cool-network
x-bar: baz
x-foo: bar
volumes:
# Entries can be null, which specifies simply that a volume
# called "{project name}_some-volume" should be created and
# use the default driver
some-volume:
other-volume:
driver: flocker
driver_opts:
# Values can be strings or numbers
foo: "bar"
baz: 1
labels:
foo: bar
another-volume:
name: "user_specified_name"
driver: vsphere
driver_opts:
# Values can be strings or numbers
foo: "bar"
baz: 1
external-volume:
# Specifies that a pre-existing volume called "external-volume"
# can be referred to within this file as "external-volume"
external: true
other-external-volume:
# Specifies that a pre-existing volume called "my-cool-volume"
# can be referred to within this file as "other-external-volume"
# This example uses the deprecated "volume.external.name" (replaced by "volume.name")
external:
name: my-cool-volume
external-volume3:
# Specifies that a pre-existing volume called "this-is-volume3"
# can be referred to within this file as "external-volume3"
name: this-is-volume3
external: true
x-bar: baz
x-foo: bar
cluster-volume:
driver: my-csi-driver
x-cluster-spec:
group: mygroup
access_mode:
scope: single
sharing: none
block_volume: {}
accessibility_requirements:
requisite:
- segments:
- region=R1
- zone=Z1
- segments:
region: R1
zone: Z2
preferred:
- segments:
region: R1
zone: Z1
capacity_range:
required_bytes: 1G
limit_bytes: 8G
secrets:
- key: mycsisecret
secret: secret1
- key: mycsisecret2
secret: secret4
availability: active
configs:
config1:
file: ./config_data
labels:
foo: bar
config2:
external:
name: my_config
config3:
external: true
config4:
name: foo
x-bar: baz
x-foo: bar
secrets:
secret1:
file: ./secret_data
labels:
foo: bar
secret2:
external:
name: my_secret
secret3:
external: true
secret4:
name: bar
x-bar: baz
x-foo: bar
x-bar: baz
x-foo: bar
x-nested:
bar: baz
foo: bar

View File

@ -0,0 +1,75 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package loader
import (
"strconv"
"strings"
interp "github.com/docker/cli/cli/compose/interpolation"
"github.com/pkg/errors"
)
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
servicePath("configs", interp.PathMatchList, "mode"): toInt,
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
servicePath("healthcheck", "retries"): toInt,
servicePath("healthcheck", "disable"): toBoolean,
servicePath("deploy", "replicas"): toInt,
servicePath("deploy", "update_config", "parallelism"): toInt,
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
servicePath("deploy", "rollback_config", "parallelism"): toInt,
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
servicePath("ports", interp.PathMatchList, "target"): toInt,
servicePath("ports", interp.PathMatchList, "published"): toInt,
servicePath("ulimits", interp.PathMatchAll): toInt,
servicePath("ulimits", interp.PathMatchAll, "hard"): toInt,
servicePath("ulimits", interp.PathMatchAll, "soft"): toInt,
servicePath("privileged"): toBoolean,
servicePath("read_only"): toBoolean,
servicePath("stdin_open"): toBoolean,
servicePath("tty"): toBoolean,
servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean,
servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean,
iPath("networks", interp.PathMatchAll, "external"): toBoolean,
iPath("networks", interp.PathMatchAll, "internal"): toBoolean,
iPath("networks", interp.PathMatchAll, "attachable"): toBoolean,
iPath("volumes", interp.PathMatchAll, "external"): toBoolean,
iPath("secrets", interp.PathMatchAll, "external"): toBoolean,
iPath("configs", interp.PathMatchAll, "external"): toBoolean,
}
func iPath(parts ...string) interp.Path {
return interp.NewPath(parts...)
}
func servicePath(parts ...string) interp.Path {
return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...)
}
func toInt(value string) (any, error) {
return strconv.Atoi(value)
}
func toFloat(value string) (any, error) {
return strconv.ParseFloat(value, 64)
}
// should match http://yaml.org/type/bool.html
func toBoolean(value string) (any, error) {
switch strings.ToLower(value) {
case "y", "yes", "true", "on":
return true, nil
case "n", "no", "false", "off":
return false, nil
default:
return nil, errors.Errorf("invalid boolean: %s", value)
}
}
func interpolateConfig(configDict map[string]any, opts interp.Options) (map[string]any, error) {
return interp.Interpolate(configDict, opts)
}

View File

@ -0,0 +1,988 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package loader
import (
"fmt"
"path"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"time"
interp "github.com/docker/cli/cli/compose/interpolation"
"github.com/docker/cli/cli/compose/schema"
"github.com/docker/cli/cli/compose/template"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/versions"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
"github.com/go-viper/mapstructure/v2"
"github.com/google/shlex"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
)
// Options supported by Load
type Options struct {
// Skip schema validation
SkipValidation bool
// Skip interpolation
SkipInterpolation bool
// Interpolation options
Interpolate *interp.Options
// Discard 'env_file' entries after resolving to 'environment' section
discardEnvFiles bool
}
// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
// the `environment` section
func WithDiscardEnvFiles(options *Options) {
options.discardEnvFiles = true
}
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
// structure, and returns it.
func ParseYAML(source []byte) (map[string]any, error) {
var cfg any
if err := yaml.Unmarshal(source, &cfg); err != nil {
return nil, err
}
cfgMap, ok := cfg.(map[any]any)
if !ok {
return nil, errors.Errorf("top-level object must be a mapping")
}
converted, err := convertToStringKeysRecursive(cfgMap, "")
if err != nil {
return nil, err
}
return converted.(map[string]any), nil
}
// Load reads a ConfigDetails and returns a fully loaded configuration
func Load(configDetails types.ConfigDetails, opt ...func(*Options)) (*types.Config, error) {
if len(configDetails.ConfigFiles) < 1 {
return nil, errors.Errorf("No files specified")
}
options := &Options{
Interpolate: &interp.Options{
Substitute: template.Substitute,
LookupValue: configDetails.LookupEnv,
TypeCastMapping: interpolateTypeCastMapping,
},
}
for _, op := range opt {
op(options)
}
configs := []*types.Config{}
var err error
for _, file := range configDetails.ConfigFiles {
configDict := file.Config
version := schema.Version(configDict)
if configDetails.Version == "" {
configDetails.Version = version
}
if configDetails.Version != version {
return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version)
}
if err := validateForbidden(configDict); err != nil {
return nil, err
}
if !options.SkipInterpolation {
configDict, err = interpolateConfig(configDict, *options.Interpolate)
if err != nil {
return nil, err
}
}
if !options.SkipValidation {
if err := schema.Validate(configDict, configDetails.Version); err != nil {
return nil, err
}
}
cfg, err := loadSections(configDict, configDetails)
if err != nil {
return nil, err
}
cfg.Filename = file.Filename
if options.discardEnvFiles {
for i := range cfg.Services {
cfg.Services[i].EnvFile = nil
}
}
configs = append(configs, cfg)
}
return merge(configs)
}
func validateForbidden(configDict map[string]any) error {
servicesDict, ok := configDict["services"].(map[string]any)
if !ok {
return nil
}
forbidden := getProperties(servicesDict, types.ForbiddenProperties)
if len(forbidden) > 0 {
return &ForbiddenPropertiesError{Properties: forbidden}
}
return nil
}
func loadSections(config map[string]any, configDetails types.ConfigDetails) (*types.Config, error) {
var err error
cfg := types.Config{
Version: schema.Version(config),
}
loaders := []struct {
key string
fnc func(config map[string]any) error
}{
{
key: "services",
fnc: func(config map[string]any) error {
cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv)
return err
},
},
{
key: "networks",
fnc: func(config map[string]any) error {
cfg.Networks, err = LoadNetworks(config, configDetails.Version)
return err
},
},
{
key: "volumes",
fnc: func(config map[string]any) error {
cfg.Volumes, err = LoadVolumes(config, configDetails.Version)
return err
},
},
{
key: "secrets",
fnc: func(config map[string]any) error {
cfg.Secrets, err = LoadSecrets(config, configDetails)
return err
},
},
{
key: "configs",
fnc: func(config map[string]any) error {
cfg.Configs, err = LoadConfigObjs(config, configDetails)
return err
},
},
}
for _, loader := range loaders {
if err := loader.fnc(getSection(config, loader.key)); err != nil {
return nil, err
}
}
cfg.Extras = getExtras(config)
return &cfg, nil
}
func getSection(config map[string]any, key string) map[string]any {
section, ok := config[key]
if !ok {
return make(map[string]any)
}
return section.(map[string]any)
}
// GetUnsupportedProperties returns the list of any unsupported properties that are
// used in the Compose files.
func GetUnsupportedProperties(configDicts ...map[string]any) []string {
unsupported := map[string]bool{}
for _, configDict := range configDicts {
for _, service := range getServices(configDict) {
serviceDict := service.(map[string]any)
for _, property := range types.UnsupportedProperties {
if _, isSet := serviceDict[property]; isSet {
unsupported[property] = true
}
}
}
}
return sortedKeys(unsupported)
}
func sortedKeys(set map[string]bool) []string {
keys := make([]string, 0, len(set))
for key := range set {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// GetDeprecatedProperties returns the list of any deprecated properties that
// are used in the compose files.
func GetDeprecatedProperties(configDicts ...map[string]any) map[string]string {
deprecated := map[string]string{}
for _, configDict := range configDicts {
deprecatedProperties := getProperties(getServices(configDict), types.DeprecatedProperties)
for key, value := range deprecatedProperties {
deprecated[key] = value
}
}
return deprecated
}
func getProperties(services map[string]any, propertyMap map[string]string) map[string]string {
output := map[string]string{}
for _, service := range services {
if serviceDict, ok := service.(map[string]any); ok {
for property, description := range propertyMap {
if _, isSet := serviceDict[property]; isSet {
output[property] = description
}
}
}
}
return output
}
// ForbiddenPropertiesError is returned when there are properties in the Compose
// file that are forbidden.
type ForbiddenPropertiesError struct {
Properties map[string]string
}
func (e *ForbiddenPropertiesError) Error() string {
return "Configuration contains forbidden properties"
}
func getServices(configDict map[string]any) map[string]any {
if services, ok := configDict["services"]; ok {
if servicesDict, ok := services.(map[string]any); ok {
return servicesDict
}
}
return map[string]any{}
}
// Transform converts the source into the target struct with compose types transformer
// and the specified transformers if any.
func Transform(source any, target any, additionalTransformers ...Transformer) error {
data := mapstructure.Metadata{}
config := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(
createTransformHook(additionalTransformers...),
mapstructure.StringToTimeDurationHookFunc()),
Result: target,
Metadata: &data,
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(source)
}
// TransformerFunc defines a function to perform the actual transformation
type TransformerFunc func(any) (any, error)
// Transformer defines a map to type transformer
type Transformer struct {
TypeOf reflect.Type
Func TransformerFunc
}
func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType {
transforms := map[reflect.Type]func(any) (any, error){
reflect.TypeOf(types.External{}): transformExternal,
reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest,
reflect.TypeOf(types.ShellCommand{}): transformShellCommand,
reflect.TypeOf(types.StringList{}): transformStringList,
reflect.TypeOf(map[string]string{}): transformMapStringString,
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
reflect.TypeOf(types.UnitBytes(0)): transformSize,
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true),
reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false),
reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false),
reflect.TypeOf(types.HostsList{}): transformHostsList,
reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig,
reflect.TypeOf(types.BuildConfig{}): transformBuildConfig,
reflect.TypeOf(types.Duration(0)): transformStringToDuration,
}
for _, transformer := range additionalTransformers {
transforms[transformer.TypeOf] = transformer.Func
}
return func(_ reflect.Type, target reflect.Type, data any) (any, error) {
transform, ok := transforms[target]
if !ok {
return data, nil
}
return transform(data)
}
}
// keys needs to be converted to strings for jsonschema
func convertToStringKeysRecursive(value any, keyPrefix string) (any, error) {
if mapping, ok := value.(map[any]any); ok {
dict := make(map[string]any)
for key, entry := range mapping {
str, ok := key.(string)
if !ok {
return nil, formatInvalidKeyError(keyPrefix, key)
}
var newKeyPrefix string
if keyPrefix == "" {
newKeyPrefix = str
} else {
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
}
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
if err != nil {
return nil, err
}
dict[str] = convertedEntry
}
return dict, nil
}
if list, ok := value.([]any); ok {
var convertedList []any
for index, entry := range list {
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
if err != nil {
return nil, err
}
convertedList = append(convertedList, convertedEntry)
}
return convertedList, nil
}
return value, nil
}
func formatInvalidKeyError(keyPrefix string, key any) error {
var location string
if keyPrefix == "" {
location = "at top level"
} else {
location = "in " + keyPrefix
}
return errors.Errorf("non-string key %s: %#v", location, key)
}
// LoadServices produces a ServiceConfig map from a compose file Dict
// the servicesDict is not validated if directly used. Use Load() to enable validation
func LoadServices(servicesDict map[string]any, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) {
services := make([]types.ServiceConfig, 0, len(servicesDict))
for name, serviceDef := range servicesDict {
serviceConfig, err := LoadService(name, serviceDef.(map[string]any), workingDir, lookupEnv)
if err != nil {
return nil, err
}
services = append(services, *serviceConfig)
}
return services, nil
}
// LoadService produces a single ServiceConfig from a compose file Dict
// the serviceDict is not validated if directly used. Use Load() to enable validation
func LoadService(name string, serviceDict map[string]any, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
serviceConfig := &types.ServiceConfig{}
if err := Transform(serviceDict, serviceConfig); err != nil {
return nil, err
}
serviceConfig.Name = name
if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil {
return nil, err
}
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil {
return nil, err
}
serviceConfig.Extras = getExtras(serviceDict)
return serviceConfig, nil
}
func loadExtras(name string, source map[string]any) map[string]any {
if dict, ok := source[name].(map[string]any); ok {
return getExtras(dict)
}
return nil
}
func getExtras(dict map[string]any) map[string]any {
extras := map[string]any{}
for key, value := range dict {
if strings.HasPrefix(key, "x-") {
extras[key] = value
}
}
if len(extras) == 0 {
return nil
}
return extras
}
func updateEnvironment(environment map[string]*string, vars map[string]*string, lookupEnv template.Mapping) {
for k, v := range vars {
interpolatedV, ok := lookupEnv(k)
if (v == nil || *v == "") && ok {
// lookupEnv is prioritized over vars
environment[k] = &interpolatedV
} else {
environment[k] = v
}
}
}
func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error {
environment := make(map[string]*string)
if len(serviceConfig.EnvFile) > 0 {
var envVars []string
for _, file := range serviceConfig.EnvFile {
filePath := absPath(workingDir, file)
fileVars, err := opts.ParseEnvFile(filePath)
if err != nil {
return err
}
envVars = append(envVars, fileVars...)
}
updateEnvironment(environment,
opts.ConvertKVStringsToMapWithNil(envVars), lookupEnv)
}
updateEnvironment(environment, serviceConfig.Environment, lookupEnv)
serviceConfig.Environment = environment
return nil
}
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error {
for i, volume := range volumes {
if volume.Type != "bind" {
continue
}
if volume.Source == "" {
return errors.New(`invalid mount config for type "bind": field Source must not be empty`)
}
filePath := expandUser(volume.Source, lookupEnv)
// Check if source is an absolute path (either Unix or Windows), to
// handle a Windows client with a Unix daemon or vice-versa.
//
// Note that this is not required for Docker for Windows when specifying
// a local Windows path, because Docker for Windows translates the Windows
// path into a valid path within the VM.
if !path.IsAbs(filePath) && !isAbs(filePath) {
filePath = absPath(workingDir, filePath)
}
volume.Source = filePath
volumes[i] = volume
}
return nil
}
// TODO: make this more robust
func expandUser(srcPath string, lookupEnv template.Mapping) string {
if strings.HasPrefix(srcPath, "~") {
home, ok := lookupEnv("HOME")
if !ok {
logrus.Warn("cannot expand '~', because the environment lacks HOME")
return srcPath
}
return strings.Replace(srcPath, "~", home, 1)
}
return srcPath
}
func transformUlimits(data any) (any, error) {
switch value := data.(type) {
case int:
return types.UlimitsConfig{Single: value}, nil
case map[string]any:
ulimit := types.UlimitsConfig{}
ulimit.Soft = value["soft"].(int)
ulimit.Hard = value["hard"].(int)
return ulimit, nil
default:
return data, errors.Errorf("invalid type %T for ulimits", value)
}
}
// LoadNetworks produces a NetworkConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadNetworks(source map[string]any, version string) (map[string]types.NetworkConfig, error) {
networks := make(map[string]types.NetworkConfig)
err := Transform(source, &networks)
if err != nil {
return networks, err
}
for name, network := range networks {
if !network.External.External {
continue
}
switch {
case network.External.Name != "":
if network.Name != "" {
return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name)
}
if versions.GreaterThanOrEqualTo(version, "3.5") {
logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name)
}
network.Name = network.External.Name
network.External.Name = ""
case network.Name == "":
network.Name = name
}
network.Extras = loadExtras(name, source)
networks[name] = network
}
return networks, nil
}
func externalVolumeError(volume, key string) error {
return errors.Errorf(
"conflicting parameters \"external\" and %q specified for volume %q",
key, volume)
}
// LoadVolumes produces a VolumeConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadVolumes(source map[string]any, version string) (map[string]types.VolumeConfig, error) {
volumes := make(map[string]types.VolumeConfig)
if err := Transform(source, &volumes); err != nil {
return volumes, err
}
for name, volume := range volumes {
if !volume.External.External {
continue
}
switch {
case volume.Driver != "":
return nil, externalVolumeError(name, "driver")
case len(volume.DriverOpts) > 0:
return nil, externalVolumeError(name, "driver_opts")
case len(volume.Labels) > 0:
return nil, externalVolumeError(name, "labels")
case volume.External.Name != "":
if volume.Name != "" {
return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name)
}
if versions.GreaterThanOrEqualTo(version, "3.4") {
logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name)
}
volume.Name = volume.External.Name
volume.External.Name = ""
case volume.Name == "":
volume.Name = name
}
volume.Extras = loadExtras(name, source)
volumes[name] = volume
}
return volumes, nil
}
// LoadSecrets produces a SecretConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadSecrets(source map[string]any, details types.ConfigDetails) (map[string]types.SecretConfig, error) {
secrets := make(map[string]types.SecretConfig)
if err := Transform(source, &secrets); err != nil {
return secrets, err
}
for name, secret := range secrets {
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details)
if err != nil {
return nil, err
}
secretConfig := types.SecretConfig(obj)
secretConfig.Extras = loadExtras(name, source)
secrets[name] = secretConfig
}
return secrets, nil
}
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadConfigObjs(source map[string]any, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) {
configs := make(map[string]types.ConfigObjConfig)
if err := Transform(source, &configs); err != nil {
return configs, err
}
for name, config := range configs {
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details)
if err != nil {
return nil, err
}
configConfig := types.ConfigObjConfig(obj)
configConfig.Extras = loadExtras(name, source)
configs[name] = configConfig
}
return configs, nil
}
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) {
// if "external: true"
switch {
case obj.External.External:
// handle deprecated external.name
if obj.External.Name != "" {
if obj.Name != "" {
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name)
}
if versions.GreaterThanOrEqualTo(details.Version, "3.5") {
logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name)
}
obj.Name = obj.External.Name
obj.External.Name = ""
} else if obj.Name == "" {
obj.Name = name
}
// if not "external: true"
case obj.Driver != "":
if obj.File != "" {
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
}
default:
obj.File = absPath(details.WorkingDir, obj.File)
}
return obj, nil
}
func absPath(workingDir string, filePath string) string {
if filepath.IsAbs(filePath) {
return filePath
}
return filepath.Join(workingDir, filePath)
}
var transformMapStringString TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case map[string]any:
return toMapStringString(value, false), nil
case map[string]string:
return value, nil
default:
return data, errors.Errorf("invalid type %T for map[string]string", value)
}
}
var transformExternal TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case bool:
return map[string]any{"external": value}, nil
case map[string]any:
return map[string]any{"external": true, "name": value["name"]}, nil
default:
return data, errors.Errorf("invalid type %T for external", value)
}
}
var transformServicePort TransformerFunc = func(data any) (any, error) {
switch entries := data.(type) {
case []any:
// We process the list instead of individual items here.
// The reason is that one entry might be mapped to multiple ServicePortConfig.
// Therefore we take an input of a list and return an output of a list.
ports := []any{}
for _, entry := range entries {
switch value := entry.(type) {
case int:
v, err := toServicePortConfigs(strconv.Itoa(value))
if err != nil {
return data, err
}
ports = append(ports, v...)
case string:
v, err := toServicePortConfigs(value)
if err != nil {
return data, err
}
ports = append(ports, v...)
case map[string]any:
ports = append(ports, value)
default:
return data, errors.Errorf("invalid type %T for port", value)
}
}
return ports, nil
default:
return data, errors.Errorf("invalid type %T for port", entries)
}
}
var transformStringSourceMap TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case string:
return map[string]any{"source": value}, nil
case map[string]any:
return data, nil
default:
return data, errors.Errorf("invalid type %T for secret", value)
}
}
var transformBuildConfig TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case string:
return map[string]any{"context": value}, nil
case map[string]any:
return data, nil
default:
return data, errors.Errorf("invalid type %T for service build", value)
}
}
var transformServiceVolumeConfig TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case string:
return ParseVolume(value)
case map[string]any:
return data, nil
default:
return data, errors.Errorf("invalid type %T for service volume", value)
}
}
var transformServiceNetworkMap TransformerFunc = func(value any) (any, error) {
if list, ok := value.([]any); ok {
mapValue := map[any]any{}
for _, name := range list {
mapValue[name] = nil
}
return mapValue, nil
}
return value, nil
}
var transformStringOrNumberList TransformerFunc = func(value any) (any, error) {
list := value.([]any)
result := make([]string, len(list))
for i, item := range list {
result[i] = fmt.Sprint(item)
}
return result, nil
}
var transformStringList TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case string:
return []string{value}, nil
case []any:
return value, nil
default:
return data, errors.Errorf("invalid type %T for string list", value)
}
}
var transformHostsList TransformerFunc = func(data any) (any, error) {
hl := transformListOrMapping(data, ":", false, []string{"=", ":"})
// Remove brackets from IP addresses if present (for example "[::1]" -> "::1").
result := make([]string, 0, len(hl))
for _, hip := range hl {
host, ip, _ := strings.Cut(hip, ":")
if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
ip = ip[1 : len(ip)-1]
}
result = append(result, fmt.Sprintf("%s:%s", host, ip))
}
return result, nil
}
// transformListOrMapping transforms pairs of strings that may be represented as
// a map, or a list of '=' or ':' separated strings, into a list of ':' separated
// strings.
func transformListOrMapping(listOrMapping any, sep string, allowNil bool, allowSeps []string) []string {
switch value := listOrMapping.(type) {
case map[string]any:
return toStringList(value, sep, allowNil)
case []any:
result := make([]string, 0, len(value))
for _, entry := range value {
for i, allowSep := range allowSeps {
entry := fmt.Sprint(entry)
k, v, ok := strings.Cut(entry, allowSep)
if ok {
// Entry uses this allowed separator. Add it to the result, using
// sep as a separator.
result = append(result, fmt.Sprintf("%s%s%s", k, sep, v))
break
} else if i == len(allowSeps)-1 {
// No more separators to try, keep the entry if allowNil.
if allowNil {
result = append(result, k)
}
}
}
}
return result
}
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
}
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
return func(data any) (any, error) {
return transformMappingOrList(data, sep, allowNil), nil
}
}
func transformMappingOrList(mappingOrList any, sep string, allowNil bool) any {
switch values := mappingOrList.(type) {
case map[string]any:
return toMapStringString(values, allowNil)
case []any:
result := make(map[string]any)
for _, v := range values {
key, val, hasValue := strings.Cut(v.(string), sep)
switch {
case !hasValue && allowNil:
result[key] = nil
case !hasValue && !allowNil:
result[key] = ""
default:
result[key] = val
}
}
return result
}
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
}
var transformShellCommand TransformerFunc = func(value any) (any, error) {
if str, ok := value.(string); ok {
return shlex.Split(str)
}
return value, nil
}
var transformHealthCheckTest TransformerFunc = func(data any) (any, error) {
switch value := data.(type) {
case string:
return append([]string{"CMD-SHELL"}, value), nil
case []any:
return value, nil
default:
return value, errors.Errorf("invalid type %T for healthcheck.test", value)
}
}
var transformSize TransformerFunc = func(value any) (any, error) {
switch value := value.(type) {
case int:
return int64(value), nil
case string:
return units.RAMInBytes(value)
}
panic(errors.Errorf("invalid type for size %T", value))
}
var transformStringToDuration TransformerFunc = func(value any) (any, error) {
switch value := value.(type) {
case string:
d, err := time.ParseDuration(value)
if err != nil {
return value, err
}
return types.Duration(d), nil
default:
return value, errors.Errorf("invalid type %T for duration", value)
}
}
func toServicePortConfigs(value string) ([]any, error) {
var portConfigs []any
ports, portBindings, err := nat.ParsePortSpecs([]string{value})
if err != nil {
return nil, err
}
// We need to sort the key of the ports to make sure it is consistent
keys := []string{}
for port := range ports {
keys = append(keys, string(port))
}
sort.Strings(keys)
for _, key := range keys {
// Reuse ConvertPortToPortConfig so that it is consistent
portConfig, err := opts.ConvertPortToPortConfig(nat.Port(key), portBindings)
if err != nil {
return nil, err
}
for _, p := range portConfig {
portConfigs = append(portConfigs, types.ServicePortConfig{
Protocol: string(p.Protocol),
Target: p.TargetPort,
Published: p.PublishedPort,
Mode: string(p.PublishMode),
})
}
}
return portConfigs, nil
}
func toMapStringString(value map[string]any, allowNil bool) map[string]any {
output := make(map[string]any)
for key, value := range value {
output[key] = toString(value, allowNil)
}
return output
}
func toString(value any, allowNil bool) any {
switch {
case value != nil:
return fmt.Sprint(value)
case allowNil:
return nil
default:
return ""
}
}
func toStringList(value map[string]any, separator string, allowNil bool) []string {
output := []string{}
for key, value := range value {
if value == nil && !allowNil {
continue
}
output = append(output, fmt.Sprintf("%s%s%s", key, separator, value))
}
sort.Strings(output)
return output
}

View File

@ -0,0 +1,304 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package loader
import (
"reflect"
"sort"
"dario.cat/mergo"
"github.com/docker/cli/cli/compose/types"
"github.com/pkg/errors"
)
type specials struct {
m map[reflect.Type]func(dst, src reflect.Value) error
}
func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error {
if fn, ok := s.m[t]; ok {
return fn
}
return nil
}
func merge(configs []*types.Config) (*types.Config, error) {
base := configs[0]
for _, override := range configs[1:] {
var err error
base.Services, err = mergeServices(base.Services, override.Services)
if err != nil {
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
}
base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes)
if err != nil {
return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename)
}
base.Networks, err = mergeNetworks(base.Networks, override.Networks)
if err != nil {
return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename)
}
base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets)
if err != nil {
return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename)
}
base.Configs, err = mergeConfigs(base.Configs, override.Configs)
if err != nil {
return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename)
}
}
return base, nil
}
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
baseServices := mapByName(base)
overrideServices := mapByName(override)
specials := &specials{
m: map[reflect.Type]func(dst, src reflect.Value) error{
reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig),
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig,
reflect.TypeOf([]types.ServiceVolumeConfig{}): mergeSlice(toServiceVolumeConfigsMap, toServiceVolumeConfigsSlice),
reflect.TypeOf(types.ShellCommand{}): mergeShellCommand,
reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig,
reflect.PointerTo(reflect.TypeOf(uint64(1))): mergeUint64,
},
}
for name, overrideService := range overrideServices {
overrideService := overrideService
if baseService, ok := baseServices[name]; ok {
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
return base, errors.Wrapf(err, "cannot merge service %s", name)
}
baseServices[name] = baseService
continue
}
baseServices[name] = overrideService
}
services := []types.ServiceConfig{}
for _, baseService := range baseServices {
services = append(services, baseService)
}
sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
return services, nil
}
func toServiceSecretConfigsMap(s any) (map[any]any, error) {
secrets, ok := s.([]types.ServiceSecretConfig)
if !ok {
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
}
m := map[any]any{}
for _, secret := range secrets {
m[secret.Source] = secret
}
return m, nil
}
func toServiceConfigObjConfigsMap(s any) (map[any]any, error) {
secrets, ok := s.([]types.ServiceConfigObjConfig)
if !ok {
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
}
m := map[any]any{}
for _, secret := range secrets {
m[secret.Source] = secret
}
return m, nil
}
func toServicePortConfigsMap(s any) (map[any]any, error) {
ports, ok := s.([]types.ServicePortConfig)
if !ok {
return nil, errors.Errorf("not a servicePortConfig slice: %v", s)
}
m := map[any]any{}
for _, p := range ports {
m[p.Published] = p
}
return m, nil
}
func toServiceVolumeConfigsMap(s any) (map[any]any, error) {
volumes, ok := s.([]types.ServiceVolumeConfig)
if !ok {
return nil, errors.Errorf("not a serviceVolumeConfig slice: %v", s)
}
m := map[any]any{}
for _, v := range volumes {
m[v.Target] = v
}
return m, nil
}
func toServiceSecretConfigsSlice(dst reflect.Value, m map[any]any) error {
s := []types.ServiceSecretConfig{}
for _, v := range m {
s = append(s, v.(types.ServiceSecretConfig))
}
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
dst.Set(reflect.ValueOf(s))
return nil
}
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[any]any) error {
s := []types.ServiceConfigObjConfig{}
for _, v := range m {
s = append(s, v.(types.ServiceConfigObjConfig))
}
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
dst.Set(reflect.ValueOf(s))
return nil
}
func toServicePortConfigsSlice(dst reflect.Value, m map[any]any) error {
s := []types.ServicePortConfig{}
for _, v := range m {
s = append(s, v.(types.ServicePortConfig))
}
sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published })
dst.Set(reflect.ValueOf(s))
return nil
}
func toServiceVolumeConfigsSlice(dst reflect.Value, m map[any]any) error {
s := []types.ServiceVolumeConfig{}
for _, v := range m {
s = append(s, v.(types.ServiceVolumeConfig))
}
sort.Slice(s, func(i, j int) bool { return s[i].Target < s[j].Target })
dst.Set(reflect.ValueOf(s))
return nil
}
type (
tomapFn func(s any) (map[any]any, error)
writeValueFromMapFn func(reflect.Value, map[any]any) error
)
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
return func(dst, src reflect.Value) error {
if src.IsNil() {
return nil
}
if dst.IsNil() {
dst.Set(src)
return nil
}
return mergeFn(dst, src)
}
}
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
return func(dst, src reflect.Value) error {
dstMap, err := sliceToMap(tomap, dst)
if err != nil {
return err
}
srcMap, err := sliceToMap(tomap, src)
if err != nil {
return err
}
if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil {
return err
}
return writeValue(dst, dstMap)
}
}
func sliceToMap(tomap tomapFn, v reflect.Value) (map[any]any, error) {
// check if valid
if !v.IsValid() {
return nil, errors.Errorf("invalid value : %+v", v)
}
return tomap(v.Interface())
}
func mergeLoggingConfig(dst, src reflect.Value) error {
// Same driver, merging options
if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) ||
getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" {
if getLoggingDriver(dst.Elem()) == "" {
dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem()))
}
dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string)
srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string)
return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride)
}
// Different driver, override with src
dst.Set(src)
return nil
}
//nolint:unparam
func mergeUlimitsConfig(dst, src reflect.Value) error {
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
dst.Elem().Set(src.Elem())
}
return nil
}
//nolint:unparam
func mergeShellCommand(dst, src reflect.Value) error {
if src.Len() != 0 {
dst.Set(src)
}
return nil
}
//nolint:unparam
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" {
dst.Elem().FieldByName("Ipv4Address").SetString(ipv4)
}
if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" {
dst.Elem().FieldByName("Ipv6Address").SetString(ipv6)
}
}
return nil
}
//nolint:unparam
func mergeUint64(dst, src reflect.Value) error {
if !src.IsNil() {
dst.Elem().Set(src.Elem())
}
return nil
}
func getLoggingDriver(v reflect.Value) string {
return v.FieldByName("Driver").String()
}
func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig {
m := map[string]types.ServiceConfig{}
for _, service := range services {
m[service.Name] = service
}
return m
}
func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) {
err := mergo.Map(&base, &override, mergo.WithOverride)
return base, err
}
func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) {
err := mergo.Map(&base, &override, mergo.WithOverride)
return base, err
}
func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) {
err := mergo.Map(&base, &override, mergo.WithOverride)
return base, err
}
func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) {
err := mergo.Map(&base, &override, mergo.WithOverride)
return base, err
}

View File

@ -0,0 +1,125 @@
package loader
import (
"strings"
"unicode"
"unicode/utf8"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/mount"
"github.com/pkg/errors"
)
const endOfSpec = rune(0)
// ParseVolume parses a volume spec without any knowledge of the target platform
func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
volume := types.ServiceVolumeConfig{}
switch len(spec) {
case 0:
return volume, errors.New("invalid empty volume spec")
case 1, 2:
volume.Target = spec
volume.Type = string(mount.TypeVolume)
return volume, nil
}
buffer := []rune{}
for _, char := range spec + string(endOfSpec) {
switch {
case isWindowsDrive(buffer, char):
buffer = append(buffer, char)
case char == ':' || char == endOfSpec:
if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
populateType(&volume)
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
}
buffer = []rune{}
default:
buffer = append(buffer, char)
}
}
populateType(&volume)
return volume, nil
}
func isWindowsDrive(buffer []rune, char rune) bool {
return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
}
func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
strBuffer := string(buffer)
switch {
case len(buffer) == 0:
return errors.New("empty section between colons")
// Anonymous volume
case volume.Source == "" && char == endOfSpec:
volume.Target = strBuffer
return nil
case volume.Source == "":
volume.Source = strBuffer
return nil
case volume.Target == "":
volume.Target = strBuffer
return nil
case char == ':':
return errors.New("too many colons")
}
for _, option := range strings.Split(strBuffer, ",") {
switch option {
case "ro":
volume.ReadOnly = true
case "rw":
volume.ReadOnly = false
case "nocopy":
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
default:
if isBindOption(option) {
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
}
// ignore unknown options
}
}
return nil
}
func isBindOption(option string) bool {
for _, propagation := range mount.Propagations {
if mount.Propagation(option) == propagation {
return true
}
}
return false
}
func populateType(volume *types.ServiceVolumeConfig) {
switch {
// Anonymous volume
case volume.Source == "":
volume.Type = string(mount.TypeVolume)
case isFilePath(volume.Source):
volume.Type = string(mount.TypeBind)
default:
volume.Type = string(mount.TypeVolume)
}
}
func isFilePath(source string) bool {
switch source[0] {
case '.', '/', '~':
return true
}
if len([]rune(source)) == 1 {
return false
}
// windows named pipes
if strings.HasPrefix(source, `\\`) {
return true
}
first, nextIndex := utf8.DecodeRuneInString(source)
return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
}

View File

@ -0,0 +1,67 @@
package loader
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// https://github.com/golang/go/blob/master/LICENSE
// This file contains utilities to check for Windows absolute paths on Linux.
// The code in this file was largely copied from the Golang filepath package
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
func isSlash(c uint8) bool {
return c == '\\' || c == '/'
}
// isAbs reports whether the path is a Windows absolute path.
func isAbs(path string) (b bool) {
l := volumeNameLen(path)
if l == 0 {
return false
}
path = path[l:]
if path == "" {
return false
}
return isSlash(path[0])
}
// volumeNameLen returns length of the leading volume name on Windows.
// It returns 0 elsewhere.
//
//nolint:gocyclo
func volumeNameLen(path string) int {
if len(path) < 2 {
return 0
}
// with drive letter
c := path[0]
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
return 2
}
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
!isSlash(path[2]) && path[2] != '.' {
// first, leading `\\` and next shouldn't be `\`. its server name.
for n := 3; n < l-1; n++ {
// second, next '\' shouldn't be repeated.
if isSlash(path[n]) {
n++
// third, following something characters. its share name.
if !isSlash(path[n]) {
if path[n] == '.' {
break
}
for ; n < l; n++ {
if isSlash(path[n]) {
break
}
}
return n
}
break
}
}
}
return 0
}

View File

@ -0,0 +1,384 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.0.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"container_name": {"type": "string"},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "ports"
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resource"},
"reservations": {"$ref": "#/definitions/resource"}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"resource": {
"id": "#/definitions/resource",
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,429 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.1.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"container_name": {"type": "string"},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "ports"
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resource"},
"reservations": {"$ref": "#/definitions/resource"}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"resource": {
"id": "#/definitions/resource",
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,672 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.10.json",
"type": "object",
"properties": {
"version": {
"type": "string",
"default": "3.10"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroupns_mode": {"type": "string"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"pids": {"type": "integer"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"x-cluster-spec": {
"type": "object",
"properties": {
"group": {"type": "string"},
"access_mode": {
"type": "object",
"properties": {
"scope": {"type": "string"},
"sharing": {"type": "string"},
"block_volume": {"type": "object"},
"mount_volume": {
"type": "object",
"properties": {
"fs_type": {"type": "string"},
"mount_flags": {"type": "array", "items": {"type": "string"}}
}
}
}
},
"accessibility_requirements": {
"type": "object",
"properties": {
"requisite": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
},
"preferred": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
}
}
},
"capacity_range": {
"type": "object",
"properties": {
"required_bytes": {"type": "string"},
"limit_bytes": {"type": "string"}
}
},
"availability": {"type": "string"}
}
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,672 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.11.json",
"type": "object",
"properties": {
"version": {
"type": "string",
"default": "3.11"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": true
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroupns_mode": {"type": "string"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"pids": {"type": "integer"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"x-cluster-spec": {
"type": "object",
"properties": {
"group": {"type": "string"},
"access_mode": {
"type": "object",
"properties": {
"scope": {"type": "string"},
"sharing": {"type": "string"},
"block_volume": {"type": "object"},
"mount_volume": {
"type": "object",
"properties": {
"fs_type": {"type": "string"},
"mount_flags": {"type": "array", "items": {"type": "string"}}
}
}
}
},
"accessibility_requirements": {
"type": "object",
"properties": {
"requisite": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
},
"preferred": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
}
}
},
"capacity_range": {
"type": "object",
"properties": {
"required_bytes": {"type": "string"},
"limit_bytes": {"type": "string"}
}
},
"availability": {"type": "string"}
}
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,673 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.12.json",
"type": "object",
"properties": {
"version": {
"type": "string",
"default": "3.12"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": true
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroupns_mode": {"type": "string"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"},
"start_interval": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"pids": {"type": "integer"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"x-cluster-spec": {
"type": "object",
"properties": {
"group": {"type": "string"},
"access_mode": {
"type": "object",
"properties": {
"scope": {"type": "string"},
"sharing": {"type": "string"},
"block_volume": {"type": "object"},
"mount_volume": {
"type": "object",
"properties": {
"fs_type": {"type": "string"},
"mount_flags": {"type": "array", "items": {"type": "string"}}
}
}
}
},
"accessibility_requirements": {
"type": "object",
"properties": {
"requisite": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
},
"preferred": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
}
}
},
"capacity_range": {
"type": "object",
"properties": {
"required_bytes": {"type": "string"},
"limit_bytes": {"type": "string"}
}
},
"availability": {"type": "string"}
}
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,679 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.13.json",
"type": "object",
"properties": {
"version": {
"type": "string",
"default": "3.13"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": true
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroupns_mode": {"type": "string"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": { "type": ["string", "number"] }
}
},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"},
"start_interval": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"pids": {"type": "integer"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"x-cluster-spec": {
"type": "object",
"properties": {
"group": {"type": "string"},
"access_mode": {
"type": "object",
"properties": {
"scope": {"type": "string"},
"sharing": {"type": "string"},
"block_volume": {"type": "object"},
"mount_volume": {
"type": "object",
"properties": {
"fs_type": {"type": "string"},
"mount_flags": {"type": "array", "items": {"type": "string"}}
}
}
}
},
"accessibility_requirements": {
"type": "object",
"properties": {
"requisite": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
},
"preferred": {
"type": "array",
"items": {
"type": "object",
"properties": {
"segments": {"$ref": "#/definitions/list_or_dict"}
}
}
}
}
},
"capacity_range": {
"type": "object",
"properties": {
"required_bytes": {"type": "string"},
"limit_bytes": {"type": "string"}
}
},
"availability": {"type": "string"}
}
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,476 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.2.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"container_name": {"type": "string"},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resource"},
"reservations": {"$ref": "#/definitions/resource"}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"resource": {
"id": "#/definitions/resource",
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,540 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.3.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resource"},
"reservations": {"$ref": "#/definitions/resource"}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"resource": {
"id": "#/definitions/resource",
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,548 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.4.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {"$ref": "#/definitions/resource"},
"reservations": {"$ref": "#/definitions/resource"}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"resource": {
"id": "#/definitions/resource",
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,577 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.5.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,586 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.6.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,606 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.7.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,617 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.8.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,620 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v3.9.json",
"type": "object",
"required": ["version"],
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroupns_mode": {"type": "string"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"container_name": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false
},
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
}
}
},
"additionalProperties": false
}
],
"uniqueItems": true
}
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"pids": {"type": "integer"}
},
"additionalProperties": false
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": "string"},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false
}
},
"additionalProperties": false
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string"}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -0,0 +1,180 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package schema
import (
"embed"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
)
const (
defaultVersion = "3.13"
versionField = "version"
)
type portsFormatChecker struct{}
func (checker portsFormatChecker) IsFormat(_ any) bool {
// TODO: implement this
return true
}
type durationFormatChecker struct{}
func (checker durationFormatChecker) IsFormat(input any) bool {
value, ok := input.(string)
if !ok {
return false
}
_, err := time.ParseDuration(value)
return err == nil
}
func init() {
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
}
// Version returns the version of the config, defaulting to the latest "3.x"
// version (3.13). If only the major version "3" is specified, it is used as
// version "3.x" and returns the default version (latest 3.x).
func Version(config map[string]any) string {
version, ok := config[versionField]
if !ok {
return defaultVersion
}
return normalizeVersion(fmt.Sprintf("%v", version))
}
func normalizeVersion(version string) string {
switch version {
case "", "3":
return defaultVersion
default:
return version
}
}
//go:embed data/config_schema_v*.json
var schemas embed.FS
// Validate uses the jsonschema to validate the configuration
func Validate(config map[string]any, version string) error {
version = normalizeVersion(version)
schemaData, err := schemas.ReadFile("data/config_schema_v" + version + ".json")
if err != nil {
return errors.Errorf("unsupported Compose file version: %s", version)
}
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
dataLoader := gojsonschema.NewGoLoader(config)
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
if err != nil {
return err
}
if !result.Valid() {
return toError(result)
}
return nil
}
func toError(result *gojsonschema.Result) error {
err := getMostSpecificError(result.Errors())
return err
}
const (
jsonschemaOneOf = "number_one_of"
jsonschemaAnyOf = "number_any_of"
)
func getDescription(err validationError) string {
switch err.parent.Type() {
case "invalid_type":
if expectedType, ok := err.parent.Details()["expected"].(string); ok {
return "must be a " + humanReadableType(expectedType)
}
case jsonschemaOneOf, jsonschemaAnyOf:
if err.child == nil {
return err.parent.Description()
}
return err.child.Description()
}
return err.parent.Description()
}
func humanReadableType(definition string) string {
if definition[0:1] == "[" {
allTypes := strings.Split(definition[1:len(definition)-1], ",")
for i, t := range allTypes {
allTypes[i] = humanReadableType(t)
}
return fmt.Sprintf(
"%s or %s",
strings.Join(allTypes[0:len(allTypes)-1], ", "),
allTypes[len(allTypes)-1],
)
}
if definition == "object" {
return "mapping"
}
if definition == "array" {
return "list"
}
return definition
}
type validationError struct {
parent gojsonschema.ResultError
child gojsonschema.ResultError
}
func (err validationError) Error() string {
description := getDescription(err)
return fmt.Sprintf("%s %s", err.parent.Field(), description)
}
func getMostSpecificError(errs []gojsonschema.ResultError) validationError {
mostSpecificError := 0
for i, err := range errs {
if specificity(err) > specificity(errs[mostSpecificError]) {
mostSpecificError = i
continue
}
if specificity(err) == specificity(errs[mostSpecificError]) {
// Invalid type errors win in a tie-breaker for most specific field name
if err.Type() == "invalid_type" && errs[mostSpecificError].Type() != "invalid_type" {
mostSpecificError = i
}
}
}
if mostSpecificError+1 == len(errs) {
return validationError{parent: errs[mostSpecificError]}
}
switch errs[mostSpecificError].Type() {
case "number_one_of", "number_any_of":
return validationError{
parent: errs[mostSpecificError],
child: errs[mostSpecificError+1],
}
default:
return validationError{parent: errs[mostSpecificError]}
}
}
func specificity(err gojsonschema.ResultError) int {
return len(strings.Split(err.Field(), "."))
}

View File

@ -0,0 +1,248 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package template
import (
"fmt"
"regexp"
"strings"
)
const (
delimiter = "\\$"
subst = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
)
var defaultPattern = regexp.MustCompile(fmt.Sprintf(
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
delimiter, delimiter, subst, subst,
))
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
var DefaultSubstituteFuncs = []SubstituteFunc{
softDefault,
hardDefault,
requiredNonEmpty,
required,
}
// InvalidTemplateError is returned when a variable template is not in a valid
// format
type InvalidTemplateError struct {
Template string
}
func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("Invalid template: %#v", e.Template)
}
// Mapping is a user-supplied function which maps from variable names to values.
// Returns the value as a string and a bool indicating whether
// the value is present, to distinguish between an empty string
// and the absence of a value.
type Mapping func(string) (string, bool)
// SubstituteFunc is a user-supplied function that apply substitution.
// Returns the value as a string, a bool indicating if the function could apply
// the substitution and an error.
type SubstituteFunc func(string, Mapping) (string, bool, error)
// SubstituteWith substitutes variables in the string with their values.
// It accepts additional substitute function.
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
var err error
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
matches := pattern.FindStringSubmatch(substring)
groups := matchGroups(matches, pattern)
if escaped := groups["escaped"]; escaped != "" {
return escaped
}
substitution := groups["named"]
if substitution == "" {
substitution = groups["braced"]
}
if substitution == "" {
err = &InvalidTemplateError{Template: template}
return ""
}
for _, f := range subsFuncs {
var (
value string
applied bool
)
value, applied, err = f(substitution, mapping)
if err != nil {
return ""
}
if !applied {
continue
}
return value
}
value, _ := mapping(substitution)
return value
})
return result, err
}
// Substitute variables in the string with their values
func Substitute(template string, mapping Mapping) (string, error) {
return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
}
// ExtractVariables returns a map of all the variables defined in the specified
// composefile (dict representation) and their default value if any.
func ExtractVariables(configDict map[string]any, pattern *regexp.Regexp) map[string]string {
if pattern == nil {
pattern = defaultPattern
}
return recurseExtract(configDict, pattern)
}
func recurseExtract(value any, pattern *regexp.Regexp) map[string]string {
m := map[string]string{}
switch value := value.(type) {
case string:
if values, is := extractVariable(value, pattern); is {
for _, v := range values {
m[v.name] = v.value
}
}
case map[string]any:
for _, elem := range value {
submap := recurseExtract(elem, pattern)
for key, value := range submap {
m[key] = value
}
}
case []any:
for _, elem := range value {
if values, is := extractVariable(elem, pattern); is {
for _, v := range values {
m[v.name] = v.value
}
}
}
}
return m
}
type extractedValue struct {
name string
value string
}
func extractVariable(value any, pattern *regexp.Regexp) ([]extractedValue, bool) {
sValue, ok := value.(string)
if !ok {
return []extractedValue{}, false
}
matches := pattern.FindAllStringSubmatch(sValue, -1)
if len(matches) == 0 {
return []extractedValue{}, false
}
values := []extractedValue{}
for _, match := range matches {
groups := matchGroups(match, pattern)
if escaped := groups["escaped"]; escaped != "" {
continue
}
val := groups["named"]
if val == "" {
val = groups["braced"]
}
name := val
var defaultValue string
switch {
case strings.Contains(val, ":?"):
name, _ = partition(val, ":?")
case strings.Contains(val, "?"):
name, _ = partition(val, "?")
case strings.Contains(val, ":-"):
name, defaultValue = partition(val, ":-")
case strings.Contains(val, "-"):
name, defaultValue = partition(val, "-")
}
values = append(values, extractedValue{name: name, value: defaultValue})
}
return values, len(values) > 0
}
// Soft default (fall back if unset or empty)
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
sep := ":-"
if !strings.Contains(substitution, sep) {
return "", false, nil
}
name, defaultValue := partition(substitution, sep)
value, ok := mapping(name)
if !ok || value == "" {
return defaultValue, true, nil
}
return value, true, nil
}
// Hard default (fall back if-and-only-if empty)
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
sep := "-"
if !strings.Contains(substitution, sep) {
return "", false, nil
}
name, defaultValue := partition(substitution, sep)
value, ok := mapping(name)
if !ok {
return defaultValue, true, nil
}
return value, true, nil
}
func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) {
return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" })
}
func required(substitution string, mapping Mapping) (string, bool, error) {
return withRequired(substitution, mapping, "?", func(_ string) bool { return true })
}
func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) {
if !strings.Contains(substitution, sep) {
return "", false, nil
}
name, errorMessage := partition(substitution, sep)
value, ok := mapping(name)
if !ok || !valid(value) {
return "", true, &InvalidTemplateError{
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
}
}
return value, true, nil
}
func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
groups := make(map[string]string)
for i, name := range pattern.SubexpNames()[1:] {
groups[name] = matches[i+1]
}
return groups
}
// Split the string at the first occurrence of sep, and return the part before the separator,
// and the part after the separator.
//
// If the separator is not found, return the string itself, followed by an empty string.
func partition(s, sep string) (string, string) {
k, v, ok := strings.Cut(s, sep)
if !ok {
return s, ""
}
return k, v
}

601
vendor/github.com/docker/cli/cli/compose/types/types.go generated vendored Normal file
View File

@ -0,0 +1,601 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package types
import (
"encoding/json"
"fmt"
"strconv"
"time"
)
// UnsupportedProperties not yet supported by this implementation of the compose file
var UnsupportedProperties = []string{
"build",
"cgroupns_mode",
"cgroup_parent",
"devices",
"domainname",
"external_links",
"ipc",
"links",
"mac_address",
"network_mode",
"pid",
"privileged",
"restart",
"security_opt",
"shm_size",
"userns_mode",
}
// DeprecatedProperties that were removed from the v3 format, but their
// use should not impact the behaviour of the application.
var DeprecatedProperties = map[string]string{
"container_name": "Setting the container name is not supported.",
"expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
}
// ForbiddenProperties that are not supported in this implementation of the
// compose file.
var ForbiddenProperties = map[string]string{
"extends": "Support for `extends` is not implemented yet.",
"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
"volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
"cpu_quota": "Set resource limits using deploy.resources",
"cpu_shares": "Set resource limits using deploy.resources",
"cpuset": "Set resource limits using deploy.resources",
"mem_limit": "Set resource limits using deploy.resources",
"memswap_limit": "Set resource limits using deploy.resources",
}
// ConfigFile is a filename and the contents of the file as a Dict
type ConfigFile struct {
Filename string
Config map[string]any
}
// ConfigDetails are the details about a group of ConfigFiles
type ConfigDetails struct {
Version string
WorkingDir string
ConfigFiles []ConfigFile
Environment map[string]string
}
// Duration is a thin wrapper around time.Duration with improved JSON marshalling
type Duration time.Duration
func (d Duration) String() string {
return time.Duration(d).String()
}
// ConvertDurationPtr converts a typedefined Duration pointer to a time.Duration pointer with the same value.
func ConvertDurationPtr(d *Duration) *time.Duration {
if d == nil {
return nil
}
res := time.Duration(*d)
return &res
}
// MarshalJSON makes Duration implement json.Marshaler
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// MarshalYAML makes Duration implement yaml.Marshaler
func (d Duration) MarshalYAML() (any, error) {
return d.String(), nil
}
// LookupEnv provides a lookup function for environment variables
func (cd ConfigDetails) LookupEnv(key string) (string, bool) {
v, ok := cd.Environment[key]
return v, ok
}
// Config is a full compose file configuration
type Config struct {
Filename string `yaml:"-" json:"-"`
Version string `json:"version"`
Services Services `json:"services"`
Networks map[string]NetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
Extras map[string]any `yaml:",inline" json:"-"`
}
// MarshalJSON makes Config implement json.Marshaler
func (c Config) MarshalJSON() ([]byte, error) {
m := map[string]any{
"version": c.Version,
"services": c.Services,
}
if len(c.Networks) > 0 {
m["networks"] = c.Networks
}
if len(c.Volumes) > 0 {
m["volumes"] = c.Volumes
}
if len(c.Secrets) > 0 {
m["secrets"] = c.Secrets
}
if len(c.Configs) > 0 {
m["configs"] = c.Configs
}
for k, v := range c.Extras {
m[k] = v
}
return json.Marshal(m)
}
// Services is a list of ServiceConfig
type Services []ServiceConfig
// MarshalYAML makes Services implement yaml.Marshaller
func (s Services) MarshalYAML() (any, error) {
services := map[string]ServiceConfig{}
for _, service := range s {
services[service.Name] = service
}
return services, nil
}
// MarshalJSON makes Services implement json.Marshaler
func (s Services) MarshalJSON() ([]byte, error) {
data, err := s.MarshalYAML()
if err != nil {
return nil, err
}
return json.MarshalIndent(data, "", " ")
}
// ServiceConfig is the configuration of one service
type ServiceConfig struct {
Name string `yaml:"-" json:"-"`
Build BuildConfig `yaml:",omitempty" json:"build,omitempty"`
CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"`
CgroupNSMode string `mapstructure:"cgroupns_mode" yaml:"cgroupns_mode,omitempty" json:"cgroupns_mode,omitempty"`
CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"`
Command ShellCommand `yaml:",omitempty" json:"command,omitempty"`
Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"`
CredentialSpec CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"`
DependsOn []string `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"`
Deploy DeployConfig `yaml:",omitempty" json:"deploy,omitempty"`
Devices []string `yaml:",omitempty" json:"devices,omitempty"`
DNS StringList `yaml:",omitempty" json:"dns,omitempty"`
DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"`
DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"`
Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"`
Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"`
EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"`
Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"`
ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"`
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
Hostname string `yaml:",omitempty" json:"hostname,omitempty"`
HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"`
Image string `yaml:",omitempty" json:"image,omitempty"`
Init *bool `yaml:",omitempty" json:"init,omitempty"`
Ipc string `yaml:",omitempty" json:"ipc,omitempty"`
Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Links []string `yaml:",omitempty" json:"links,omitempty"`
Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"`
MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"`
NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"`
Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
Pid string `yaml:",omitempty" json:"pid,omitempty"`
Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"`
Privileged bool `yaml:",omitempty" json:"privileged,omitempty"`
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
Restart string `yaml:",omitempty" json:"restart,omitempty"`
Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"`
ShmSize string `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"`
StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"`
StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"`
StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"`
Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"`
Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"`
Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"`
Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"`
User string `yaml:",omitempty" json:"user,omitempty"`
UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"`
Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
Extras map[string]any `yaml:",inline" json:"-"`
}
// BuildConfig is a type for build
// using the same format at libcompose: https://github.com/docker/libcompose/blob/master/yaml/build.go#L12
type BuildConfig struct {
Context string `yaml:",omitempty" json:"context,omitempty"`
Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"`
Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
Network string `yaml:",omitempty" json:"network,omitempty"`
Target string `yaml:",omitempty" json:"target,omitempty"`
}
// ShellCommand is a string or list of string args
type ShellCommand []string
// StringList is a type for fields that can be a string or list of strings
type StringList []string
// StringOrNumberList is a type for fields that can be a list of strings or
// numbers
type StringOrNumberList []string
// MappingWithEquals is a mapping type that can be converted from a list of
// key[=value] strings.
// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`.
// For the key without value (`key`), the mapped value is set to nil.
type MappingWithEquals map[string]*string
// Mapping is a mapping type that can be converted from a list of
// key[=value] strings.
// For the key with an empty value (`key=`), or key without value (`key`), the
// mapped value is set to an empty string `""`.
type Mapping map[string]string
// Labels is a mapping type for labels
type Labels map[string]string
// MappingWithColon is a mapping type that can be converted from a list of
// 'key: value' strings
type MappingWithColon map[string]string
// HostsList is a list of colon-separated host-ip mappings
type HostsList []string
// LoggingConfig the logging configuration for a service
type LoggingConfig struct {
Driver string `yaml:",omitempty" json:"driver,omitempty"`
Options map[string]string `yaml:",omitempty" json:"options,omitempty"`
}
// DeployConfig the deployment configuration for a service
type DeployConfig struct {
Mode string `yaml:",omitempty" json:"mode,omitempty"`
Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"`
RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"`
Resources Resources `yaml:",omitempty" json:"resources,omitempty"`
RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"`
Placement Placement `yaml:",omitempty" json:"placement,omitempty"`
EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"`
}
// HealthCheckConfig the healthcheck configuration for a service
type HealthCheckConfig struct {
Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"`
Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"`
Interval *Duration `yaml:",omitempty" json:"interval,omitempty"`
Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"`
StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"`
StartInterval *Duration `mapstructure:"start_interval" yaml:"start_interval,omitempty" json:"start_interval,omitempty"`
Disable bool `yaml:",omitempty" json:"disable,omitempty"`
}
// HealthCheckTest is the command run to test the health of a service
type HealthCheckTest []string
// UpdateConfig the service update configuration
type UpdateConfig struct {
Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"`
Delay Duration `yaml:",omitempty" json:"delay,omitempty"`
FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"`
Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"`
MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"`
Order string `yaml:",omitempty" json:"order,omitempty"`
}
// Resources the resource limits and reservations
type Resources struct {
Limits *ResourceLimit `yaml:",omitempty" json:"limits,omitempty"`
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
}
// ResourceLimit is a resource to be limited
type ResourceLimit struct {
// TODO: types to convert from units and ratios
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
Pids int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"`
}
// Resource is a resource to be reserved
type Resource struct {
// TODO: types to convert from units and ratios
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
}
// GenericResource represents a "user defined" resource which can
// only be an integer (e.g: SSD=3) for a service
type GenericResource struct {
DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"`
}
// DiscreteGenericResource represents a "user defined" resource which is defined
// as an integer
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
// Value is used to count the resource (SSD=5, HDD=3, ...)
type DiscreteGenericResource struct {
Kind string `json:"kind"`
Value int64 `json:"value"`
}
// UnitBytes is the bytes type
type UnitBytes int64
// MarshalYAML makes UnitBytes implement yaml.Marshaller
func (u UnitBytes) MarshalYAML() (any, error) {
return fmt.Sprintf("%d", u), nil
}
// MarshalJSON makes UnitBytes implement json.Marshaler
func (u UnitBytes) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%d"`, u)), nil
}
// RestartPolicy the service restart policy
type RestartPolicy struct {
Condition string `yaml:",omitempty" json:"condition,omitempty"`
Delay *Duration `yaml:",omitempty" json:"delay,omitempty"`
MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
Window *Duration `yaml:",omitempty" json:"window,omitempty"`
}
// Placement constraints for the service
type Placement struct {
Constraints []string `yaml:",omitempty" json:"constraints,omitempty"`
Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"`
MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"`
}
// PlacementPreferences is the preferences for a service placement
type PlacementPreferences struct {
Spread string `yaml:",omitempty" json:"spread,omitempty"`
}
// ServiceNetworkConfig is the network configuration for a service
type ServiceNetworkConfig struct {
Aliases []string `yaml:",omitempty" json:"aliases,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"`
Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"`
}
// ServicePortConfig is the port configuration for a service
type ServicePortConfig struct {
Mode string `yaml:",omitempty" json:"mode,omitempty"`
Target uint32 `yaml:",omitempty" json:"target,omitempty"`
Published uint32 `yaml:",omitempty" json:"published,omitempty"`
Protocol string `yaml:",omitempty" json:"protocol,omitempty"`
}
// ServiceVolumeConfig are references to a volume used by a service
type ServiceVolumeConfig struct {
Type string `yaml:",omitempty" json:"type,omitempty"`
Source string `yaml:",omitempty" json:"source,omitempty"`
Target string `yaml:",omitempty" json:"target,omitempty"`
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
Consistency string `yaml:",omitempty" json:"consistency,omitempty"`
Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"`
Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"`
Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"`
Cluster *ServiceVolumeCluster `yaml:",omitempty" json:"cluster,omitempty"`
}
// ServiceVolumeBind are options for a service volume of type bind
type ServiceVolumeBind struct {
Propagation string `yaml:",omitempty" json:"propagation,omitempty"`
}
// ServiceVolumeVolume are options for a service volume of type volume
type ServiceVolumeVolume struct {
NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"`
}
// ServiceVolumeTmpfs are options for a service volume of type tmpfs
type ServiceVolumeTmpfs struct {
Size int64 `yaml:",omitempty" json:"size,omitempty"`
}
// ServiceVolumeCluster are options for a service volume of type cluster.
// Deliberately left blank for future options, but unused now.
type ServiceVolumeCluster struct{}
// FileReferenceConfig for a reference to a swarm file object
type FileReferenceConfig struct {
Source string `yaml:",omitempty" json:"source,omitempty"`
Target string `yaml:",omitempty" json:"target,omitempty"`
UID string `yaml:",omitempty" json:"uid,omitempty"`
GID string `yaml:",omitempty" json:"gid,omitempty"`
Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"`
}
// ServiceConfigObjConfig is the config obj configuration for a service
type ServiceConfigObjConfig FileReferenceConfig
// ServiceSecretConfig is the secret configuration for a service
type ServiceSecretConfig FileReferenceConfig
// UlimitsConfig the ulimit configuration
type UlimitsConfig struct {
Single int `yaml:",omitempty" json:"single,omitempty"`
Soft int `yaml:",omitempty" json:"soft,omitempty"`
Hard int `yaml:",omitempty" json:"hard,omitempty"`
}
// MarshalYAML makes UlimitsConfig implement yaml.Marshaller
func (u *UlimitsConfig) MarshalYAML() (any, error) {
if u.Single != 0 {
return u.Single, nil
}
// Return as a value to avoid re-entering this method and use the default implementation
return *u, nil
}
// MarshalJSON makes UlimitsConfig implement json.Marshaller
func (u *UlimitsConfig) MarshalJSON() ([]byte, error) {
if u.Single != 0 {
return json.Marshal(u.Single)
}
// Pass as a value to avoid re-entering this method and use the default implementation
return json.Marshal(*u)
}
// NetworkConfig for a network
type NetworkConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"`
Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"`
Internal bool `yaml:",omitempty" json:"internal,omitempty"`
Attachable bool `yaml:",omitempty" json:"attachable,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Extras map[string]any `yaml:",inline" json:"-"`
}
// IPAMConfig for a network
type IPAMConfig struct {
Driver string `yaml:",omitempty" json:"driver,omitempty"`
Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"`
}
// IPAMPool for a network
type IPAMPool struct {
Subnet string `yaml:",omitempty" json:"subnet,omitempty"`
}
// VolumeConfig for a volume
type VolumeConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"`
Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Extras map[string]any `yaml:",inline" json:"-"`
Spec *ClusterVolumeSpec `mapstructure:"x-cluster-spec" yaml:"x-cluster-spec,omitempty" json:"x-cluster-spec,omitempty"`
}
// ClusterVolumeSpec defines all the configuration and options specific to a
// cluster (CSI) volume.
type ClusterVolumeSpec struct {
Group string `yaml:",omitempty" json:"group,omitempty"`
AccessMode *AccessMode `mapstructure:"access_mode" yaml:"access_mode,omitempty" json:"access_mode,omitempty"`
AccessibilityRequirements *TopologyRequirement `mapstructure:"accessibility_requirements" yaml:"accessibility_requirements,omitempty" json:"accessibility_requirements,omitempty"`
CapacityRange *CapacityRange `mapstructure:"capacity_range" yaml:"capacity_range,omitempty" json:"capacity_range,omitempty"`
Secrets []VolumeSecret `yaml:",omitempty" json:"secrets,omitempty"`
Availability string `yaml:",omitempty" json:"availability,omitempty"`
}
// AccessMode defines the way a cluster volume is accessed by the tasks
type AccessMode struct {
Scope string `yaml:",omitempty" json:"scope,omitempty"`
Sharing string `yaml:",omitempty" json:"sharing,omitempty"`
MountVolume *MountVolume `mapstructure:"mount_volume" yaml:"mount_volume,omitempty" json:"mount_volume,omitempty"`
BlockVolume *BlockVolume `mapstructure:"block_volume" yaml:"block_volume,omitempty" json:"block_volume,omitempty"`
}
// MountVolume defines options for using a volume as a Mount
type MountVolume struct {
FsType string `mapstructure:"fs_type" yaml:"fs_type,omitempty" json:"fs_type,omitempty"`
MountFlags []string `mapstructure:"mount_flags" yaml:"mount_flags,omitempty" json:"mount_flags,omitempty"`
}
// BlockVolume is deliberately empty
type BlockVolume struct{}
// TopologyRequirement defines the requirements for volume placement in the
// cluster.
type TopologyRequirement struct {
Requisite []Topology `yaml:",omitempty" json:"requisite,omitempty"`
Preferred []Topology `yaml:",omitempty" json:"preferred,omitempty"`
}
// Topology defines a particular topology group
type Topology struct {
Segments Mapping `yaml:",omitempty" json:"segments,omitempty"`
}
// CapacityRange defines the minimum and maximum size of a volume.
type CapacityRange struct {
RequiredBytes UnitBytes `mapstructure:"required_bytes" yaml:"required_bytes,omitempty" json:"required_bytes,omitempty"`
LimitBytes UnitBytes `mapstructure:"limit_bytes" yaml:"limit_bytes,omitempty" json:"limit_bytes,omitempty"`
}
// VolumeSecret defines a secret that needs to be passed to the CSI plugin when
// using the volume.
type VolumeSecret struct {
Key string `yaml:",omitempty" json:"key,omitempty"`
Secret string `yaml:",omitempty" json:"secret,omitempty"`
}
// External identifies a Volume or Network as a reference to a resource that is
// not managed, and should already exist.
// External.name is deprecated and replaced by Volume.name
type External struct {
Name string `yaml:",omitempty" json:"name,omitempty"`
External bool `yaml:",omitempty" json:"external,omitempty"`
}
// MarshalYAML makes External implement yaml.Marshaller
func (e External) MarshalYAML() (any, error) {
if e.Name == "" {
return e.External, nil
}
return External{Name: e.Name}, nil
}
// MarshalJSON makes External implement json.Marshaller
func (e External) MarshalJSON() ([]byte, error) {
if e.Name == "" {
return []byte(strconv.FormatBool(e.External)), nil
}
return []byte(fmt.Sprintf(`{"name": %q}`, e.Name)), nil
}
// CredentialSpecConfig for credential spec on Windows
type CredentialSpecConfig struct {
Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40
File string `yaml:",omitempty" json:"file,omitempty"`
Registry string `yaml:",omitempty" json:"registry,omitempty"`
}
// FileObjectConfig is a config type for a file used by a service
type FileObjectConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"`
File string `yaml:",omitempty" json:"file,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Extras map[string]any `yaml:",inline" json:"-"`
Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"`
}
// SecretConfig for a secret
type SecretConfig FileObjectConfig
// ConfigObjConfig is the config for the swarm "Config" object
type ConfigObjConfig FileObjectConfig

172
vendor/github.com/docker/cli/cli/config/config.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
package config
import (
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/cli/cli/config/types"
"github.com/pkg/errors"
)
const (
// EnvOverrideConfigDir is the name of the environment variable that can be
// used to override the location of the client configuration files (~/.docker).
//
// It takes priority over the default, but can be overridden by the "--config"
// command line option.
EnvOverrideConfigDir = "DOCKER_CONFIG"
// ConfigFileName is the name of the client configuration file inside the
// config-directory.
ConfigFileName = "config.json"
configFileDir = ".docker"
contextsDir = "contexts"
)
var (
initConfigDir = new(sync.Once)
configDir string
)
// resetConfigDir is used in testing to reset the "configDir" package variable
// and its sync.Once to force re-lookup between tests.
func resetConfigDir() {
configDir = ""
initConfigDir = new(sync.Once)
}
// getHomeDir returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
//
// On non-Windows platforms, it falls back to nss lookups, if the home
// directory cannot be obtained from environment-variables.
//
// If linking statically with cgo enabled against glibc, ensure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
//
// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker
// as dependency for consumers that only need to read the config-file.
//
// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get
func getHomeDir() string {
home, _ := os.UserHomeDir()
if home == "" && runtime.GOOS != "windows" {
if u, err := user.Current(); err == nil {
return u.HomeDir
}
}
return home
}
// Dir returns the directory the configuration file is stored in
func Dir() string {
initConfigDir.Do(func() {
configDir = os.Getenv(EnvOverrideConfigDir)
if configDir == "" {
configDir = filepath.Join(getHomeDir(), configFileDir)
}
})
return configDir
}
// ContextStoreDir returns the directory the docker contexts are stored in
func ContextStoreDir() string {
return filepath.Join(Dir(), contextsDir)
}
// SetDir sets the directory the configuration file is stored in
func SetDir(dir string) {
// trigger the sync.Once to synchronise with Dir()
initConfigDir.Do(func() {})
configDir = filepath.Clean(dir)
}
// Path returns the path to a file relative to the config dir
func Path(p ...string) (string, error) {
path := filepath.Join(append([]string{Dir()}, p...)...)
if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) {
return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir())
}
return path, nil
}
// LoadFromReader is a convenience function that creates a ConfigFile object from
// a reader. It returns an error if configData is malformed.
func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
configFile := configfile.ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
}
err := configFile.LoadFromReader(configData)
return &configFile, err
}
// Load reads the configuration file ([ConfigFileName]) from the given directory.
// If no directory is given, it uses the default [Dir]. A [*configfile.ConfigFile]
// is returned containing the contents of the configuration file, or a default
// struct if no configfile exists in the given location.
//
// Load returns an error if a configuration file exists in the given location,
// but cannot be read, or is malformed. Consumers must handle errors to prevent
// overwriting an existing configuration file.
func Load(configDir string) (*configfile.ConfigFile, error) {
if configDir == "" {
configDir = Dir()
}
return load(configDir)
}
func load(configDir string) (*configfile.ConfigFile, error) {
filename := filepath.Join(configDir, ConfigFileName)
configFile := configfile.New(filename)
file, err := os.Open(filename)
if err != nil {
if os.IsNotExist(err) {
// It is OK for no configuration file to be present, in which
// case we return a default struct.
return configFile, nil
}
// Any other error happening when failing to read the file must be returned.
return configFile, errors.Wrap(err, "loading config file")
}
defer file.Close()
err = configFile.LoadFromReader(file)
if err != nil {
err = errors.Wrapf(err, "loading config file: %s: ", filename)
}
return configFile, err
}
// LoadDefaultConfigFile attempts to load the default config file and returns
// a reference to the ConfigFile struct. If none is found or when failing to load
// the configuration file, it initializes a default ConfigFile struct. If no
// credentials-store is set in the configuration file, it attempts to discover
// the default store to use for the current platform.
//
// Important: LoadDefaultConfigFile prints a warning to stderr when failing to
// load the configuration file, but otherwise ignores errors. Consumers should
// consider using [Load] (and [credentials.DetectDefaultStore]) to detect errors
// when updating the configuration file, to prevent discarding a (malformed)
// configuration file.
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
configFile, err := load(Dir())
if err != nil {
// FIXME(thaJeztah): we should not proceed here to prevent overwriting existing (but malformed) config files; see https://github.com/docker/cli/issues/5075
_, _ = fmt.Fprintln(stderr, "WARNING: Error", err)
}
if !configFile.ContainsAuth() {
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
}
return configFile
}

View File

@ -0,0 +1,353 @@
package configfile
import (
"encoding/base64"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/cli/cli/config/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ConfigFile ~/.docker/config.json file info
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
PsFormat string `json:"psFormat,omitempty"`
ImagesFormat string `json:"imagesFormat,omitempty"`
NetworksFormat string `json:"networksFormat,omitempty"`
PluginsFormat string `json:"pluginsFormat,omitempty"`
VolumesFormat string `json:"volumesFormat,omitempty"`
StatsFormat string `json:"statsFormat,omitempty"`
DetachKeys string `json:"detachKeys,omitempty"`
CredentialsStore string `json:"credsStore,omitempty"`
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
Filename string `json:"-"` // Note: for internal use only
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
ServicesFormat string `json:"servicesFormat,omitempty"`
TasksFormat string `json:"tasksFormat,omitempty"`
SecretFormat string `json:"secretFormat,omitempty"`
ConfigFormat string `json:"configFormat,omitempty"`
NodesFormat string `json:"nodesFormat,omitempty"`
PruneFilters []string `json:"pruneFilters,omitempty"`
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
Experimental string `json:"experimental,omitempty"`
CurrentContext string `json:"currentContext,omitempty"`
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
Plugins map[string]map[string]string `json:"plugins,omitempty"`
Aliases map[string]string `json:"aliases,omitempty"`
Features map[string]string `json:"features,omitempty"`
}
// ProxyConfig contains proxy configuration settings
type ProxyConfig struct {
HTTPProxy string `json:"httpProxy,omitempty"`
HTTPSProxy string `json:"httpsProxy,omitempty"`
NoProxy string `json:"noProxy,omitempty"`
FTPProxy string `json:"ftpProxy,omitempty"`
AllProxy string `json:"allProxy,omitempty"`
}
// New initializes an empty configuration file for the given filename 'fn'
func New(fn string) *ConfigFile {
return &ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
HTTPHeaders: make(map[string]string),
Filename: fn,
Plugins: make(map[string]map[string]string),
Aliases: make(map[string]string),
}
}
// LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) {
return err
}
var err error
for addr, ac := range configFile.AuthConfigs {
if ac.Auth != "" {
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
if err != nil {
return err
}
}
ac.Auth = ""
ac.ServerAddress = addr
configFile.AuthConfigs[addr] = ac
}
return nil
}
// ContainsAuth returns whether there is authentication configured
// in this file or not.
func (configFile *ConfigFile) ContainsAuth() bool {
return configFile.CredentialsStore != "" ||
len(configFile.CredentialHelpers) > 0 ||
len(configFile.AuthConfigs) > 0
}
// GetAuthConfigs returns the mapping of repo to auth configuration
func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig {
if configFile.AuthConfigs == nil {
configFile.AuthConfigs = make(map[string]types.AuthConfig)
}
return configFile.AuthConfigs
}
// SaveToWriter encodes and writes out all the authorization information to
// the given writer
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
// Encode sensitive data into a new/temp struct
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
for k, authConfig := range configFile.AuthConfigs {
authCopy := authConfig
// encode and save the authstring, while blanking out the original fields
authCopy.Auth = encodeAuth(&authCopy)
authCopy.Username = ""
authCopy.Password = ""
authCopy.ServerAddress = ""
tmpAuthConfigs[k] = authCopy
}
saveAuthConfigs := configFile.AuthConfigs
configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
// User-Agent header is automatically set, and should not be stored in the configuration
for v := range configFile.HTTPHeaders {
if strings.EqualFold(v, "User-Agent") {
delete(configFile.HTTPHeaders, v)
}
}
data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil {
return err
}
_, err = writer.Write(data)
return err
}
// Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() (retErr error) {
if configFile.Filename == "" {
return errors.Errorf("Can't save config with empty filename")
}
dir := filepath.Dir(configFile.Filename)
if err := os.MkdirAll(dir, 0o700); err != nil {
return err
}
temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename))
if err != nil {
return err
}
defer func() {
temp.Close()
if retErr != nil {
if err := os.Remove(temp.Name()); err != nil {
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
}
}
}()
err = configFile.SaveToWriter(temp)
if err != nil {
return err
}
if err := temp.Close(); err != nil {
return errors.Wrap(err, "error closing temp file")
}
// Handle situation where the configfile is a symlink
cfgFile := configFile.Filename
if f, err := os.Readlink(cfgFile); err == nil {
cfgFile = f
}
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(cfgFile, temp.Name())
return os.Rename(temp.Name(), cfgFile)
}
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
// then checking this against any environment variables provided to the container
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
var cfgKey string
if _, ok := configFile.Proxies[host]; !ok {
cfgKey = "default"
} else {
cfgKey = host
}
config := configFile.Proxies[cfgKey]
permitted := map[string]*string{
"HTTP_PROXY": &config.HTTPProxy,
"HTTPS_PROXY": &config.HTTPSProxy,
"NO_PROXY": &config.NoProxy,
"FTP_PROXY": &config.FTPProxy,
"ALL_PROXY": &config.AllProxy,
}
m := runOpts
if m == nil {
m = make(map[string]*string)
}
for k := range permitted {
if *permitted[k] == "" {
continue
}
if _, ok := m[k]; !ok {
m[k] = permitted[k]
}
if _, ok := m[strings.ToLower(k)]; !ok {
m[strings.ToLower(k)] = permitted[k]
}
}
return m
}
// encodeAuth creates a base64 encoded string to containing authorization information
func encodeAuth(authConfig *types.AuthConfig) string {
if authConfig.Username == "" && authConfig.Password == "" {
return ""
}
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// decodeAuth decodes a base64 encoded string and returns username and password
func decodeAuth(authStr string) (string, string, error) {
if authStr == "" {
return "", "", nil
}
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return "", "", err
}
if n > decLen {
return "", "", errors.Errorf("Something went wrong decoding auth config")
}
userName, password, ok := strings.Cut(string(decoded), ":")
if !ok || userName == "" {
return "", "", errors.Errorf("Invalid auth configuration file")
}
return userName, strings.Trim(password, "\x00"), nil
}
// GetCredentialsStore returns a new credentials store from the settings in the
// configuration file
func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store {
if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" {
return newNativeStore(configFile, helper)
}
return credentials.NewFileStore(configFile)
}
// var for unit testing.
var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store {
return credentials.NewNativeStore(configFile, helperSuffix)
}
// GetAuthConfig for a repository from the credential store
func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) {
return configFile.GetCredentialsStore(registryHostname).Get(registryHostname)
}
// getConfiguredCredentialStore returns the credential helper configured for the
// given registry, the default credsStore, or the empty string if neither are
// configured.
func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string {
if c.CredentialHelpers != nil && registryHostname != "" {
if helper, exists := c.CredentialHelpers[registryHostname]; exists {
return helper
}
}
return c.CredentialsStore
}
// GetAllCredentials returns all of the credentials stored in all of the
// configured credential stores.
func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) {
auths := make(map[string]types.AuthConfig)
addAll := func(from map[string]types.AuthConfig) {
for reg, ac := range from {
auths[reg] = ac
}
}
defaultStore := configFile.GetCredentialsStore("")
newAuths, err := defaultStore.GetAll()
if err != nil {
return nil, err
}
addAll(newAuths)
// Auth configs from a registry-specific helper should override those from the default store.
for registryHostname := range configFile.CredentialHelpers {
newAuth, err := configFile.GetAuthConfig(registryHostname)
if err != nil {
logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname)
continue
}
auths[registryHostname] = newAuth
}
return auths, nil
}
// GetFilename returns the file name that this config file is based on.
func (configFile *ConfigFile) GetFilename() string {
return configFile.Filename
}
// PluginConfig retrieves the requested option for the given plugin.
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
if configFile.Plugins == nil {
return "", false
}
pluginConfig, ok := configFile.Plugins[pluginname]
if !ok {
return "", false
}
value, ok := pluginConfig[option]
return value, ok
}
// SetPluginConfig sets the option to the given value for the given
// plugin. Passing a value of "" will remove the option. If removing
// the final config item for a given plugin then also cleans up the
// overall plugin entry.
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
if configFile.Plugins == nil {
configFile.Plugins = make(map[string]map[string]string)
}
pluginConfig, ok := configFile.Plugins[pluginname]
if !ok {
pluginConfig = make(map[string]string)
configFile.Plugins[pluginname] = pluginConfig
}
if value != "" {
pluginConfig[option] = value
} else {
delete(pluginConfig, option)
}
if len(pluginConfig) == 0 {
delete(configFile.Plugins, pluginname)
}
}

View File

@ -0,0 +1,35 @@
//go:build !windows
package configfile
import (
"os"
"syscall"
)
// copyFilePermissions copies file ownership and permissions from "src" to "dst",
// ignoring any error during the process.
func copyFilePermissions(src, dst string) {
var (
mode os.FileMode = 0o600
uid, gid int
)
fi, err := os.Stat(src)
if err != nil {
return
}
if fi.Mode().IsRegular() {
mode = fi.Mode()
}
if err := os.Chmod(dst, mode); err != nil {
return
}
uid = int(fi.Sys().(*syscall.Stat_t).Uid)
gid = int(fi.Sys().(*syscall.Stat_t).Gid)
if uid > 0 && gid > 0 {
_ = os.Chown(dst, uid, gid)
}
}

View File

@ -0,0 +1,5 @@
package configfile
func copyFilePermissions(src, dst string) {
// TODO implement for Windows
}

View File

@ -0,0 +1,17 @@
package credentials
import (
"github.com/docker/cli/cli/config/types"
)
// Store is the interface that any credentials store must implement.
type Store interface {
// Erase removes credentials from the store for a given server.
Erase(serverAddress string) error
// Get retrieves credentials from the store for a given server.
Get(serverAddress string) (types.AuthConfig, error)
// GetAll retrieves all the credentials from the store.
GetAll() (map[string]types.AuthConfig, error)
// Store saves credentials in the store.
Store(authConfig types.AuthConfig) error
}

View File

@ -0,0 +1,22 @@
package credentials
import "os/exec"
// DetectDefaultStore return the default credentials store for the platform if
// no user-defined store is passed, and the store executable is available.
func DetectDefaultStore(store string) string {
if store != "" {
// use user-defined
return store
}
platformDefault := defaultCredentialsStore()
if platformDefault == "" {
return ""
}
if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil {
return ""
}
return platformDefault
}

View File

@ -0,0 +1,5 @@
package credentials
func defaultCredentialsStore() string {
return "osxkeychain"
}

View File

@ -0,0 +1,13 @@
package credentials
import (
"os/exec"
)
func defaultCredentialsStore() string {
if _, err := exec.LookPath("pass"); err == nil {
return "pass"
}
return "secretservice"
}

View File

@ -0,0 +1,7 @@
//go:build !windows && !darwin && !linux
package credentials
func defaultCredentialsStore() string {
return ""
}

View File

@ -0,0 +1,5 @@
package credentials
func defaultCredentialsStore() string {
return "wincred"
}

View File

@ -0,0 +1,86 @@
package credentials
import (
"net"
"net/url"
"strings"
"github.com/docker/cli/cli/config/types"
)
type store interface {
Save() error
GetAuthConfigs() map[string]types.AuthConfig
GetFilename() string
}
// fileStore implements a credentials store using
// the docker configuration file to keep the credentials in plain text.
type fileStore struct {
file store
}
// NewFileStore creates a new file credentials store.
func NewFileStore(file store) Store {
return &fileStore{file: file}
}
// Erase removes the given credentials from the file store.
func (c *fileStore) Erase(serverAddress string) error {
delete(c.file.GetAuthConfigs(), serverAddress)
return c.file.Save()
}
// Get retrieves credentials for a specific server from the file store.
func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
authConfig, ok := c.file.GetAuthConfigs()[serverAddress]
if !ok {
// Maybe they have a legacy config file, we will iterate the keys converting
// them to the new format and testing
for r, ac := range c.file.GetAuthConfigs() {
if serverAddress == ConvertToHostname(r) {
return ac, nil
}
}
authConfig = types.AuthConfig{}
}
return authConfig, nil
}
func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
return c.file.GetAuthConfigs(), nil
}
// Store saves the given credentials in the file store.
func (c *fileStore) Store(authConfig types.AuthConfig) error {
authConfigs := c.file.GetAuthConfigs()
authConfigs[authConfig.ServerAddress] = authConfig
return c.file.Save()
}
func (c *fileStore) GetFilename() string {
return c.file.GetFilename()
}
func (c *fileStore) IsFileStore() bool {
return true
}
// ConvertToHostname converts a registry url which has http|https prepended
// to just an hostname.
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
func ConvertToHostname(maybeURL string) string {
stripped := maybeURL
if strings.Contains(stripped, "://") {
u, err := url.Parse(stripped)
if err == nil && u.Hostname() != "" {
if u.Port() == "" {
return u.Hostname()
}
return net.JoinHostPort(u.Hostname(), u.Port())
}
}
hostName, _, _ := strings.Cut(stripped, "/")
return hostName
}

View File

@ -0,0 +1,147 @@
package credentials
import (
"github.com/docker/cli/cli/config/types"
"github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
)
const (
remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials
tokenUsername = "<token>"
)
// nativeStore implements a credentials store
// using native keychain to keep credentials secure.
// It piggybacks into a file store to keep users' emails.
type nativeStore struct {
programFunc client.ProgramFunc
fileStore Store
}
// NewNativeStore creates a new native store that
// uses a remote helper program to manage credentials.
func NewNativeStore(file store, helperSuffix string) Store {
name := remoteCredentialsPrefix + helperSuffix
return &nativeStore{
programFunc: client.NewShellProgramFunc(name),
fileStore: NewFileStore(file),
}
}
// Erase removes the given credentials from the native store.
func (c *nativeStore) Erase(serverAddress string) error {
if err := client.Erase(c.programFunc, serverAddress); err != nil {
return err
}
// Fallback to plain text store to remove email
return c.fileStore.Erase(serverAddress)
}
// Get retrieves credentials for a specific server from the native store.
func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) {
// load user email if it exist or an empty auth config.
auth, _ := c.fileStore.Get(serverAddress)
creds, err := c.getCredentialsFromStore(serverAddress)
if err != nil {
return auth, err
}
auth.Username = creds.Username
auth.IdentityToken = creds.IdentityToken
auth.Password = creds.Password
auth.ServerAddress = creds.ServerAddress
return auth, nil
}
// GetAll retrieves all the credentials from the native store.
func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) {
auths, err := c.listCredentialsInStore()
if err != nil {
return nil, err
}
// Emails are only stored in the file store.
// This call can be safely eliminated when emails are removed.
fileConfigs, _ := c.fileStore.GetAll()
authConfigs := make(map[string]types.AuthConfig)
for registry := range auths {
creds, err := c.getCredentialsFromStore(registry)
if err != nil {
return nil, err
}
ac := fileConfigs[registry] // might contain Email
ac.Username = creds.Username
ac.Password = creds.Password
ac.IdentityToken = creds.IdentityToken
if ac.ServerAddress == "" {
ac.ServerAddress = creds.ServerAddress
}
authConfigs[registry] = ac
}
return authConfigs, nil
}
// Store saves the given credentials in the file store.
func (c *nativeStore) Store(authConfig types.AuthConfig) error {
if err := c.storeCredentialsInStore(authConfig); err != nil {
return err
}
authConfig.Username = ""
authConfig.Password = ""
authConfig.IdentityToken = ""
// Fallback to old credential in plain text to save only the email
return c.fileStore.Store(authConfig)
}
// storeCredentialsInStore executes the command to store the credentials in the native store.
func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error {
creds := &credentials.Credentials{
ServerURL: config.ServerAddress,
Username: config.Username,
Secret: config.Password,
}
if config.IdentityToken != "" {
creds.Username = tokenUsername
creds.Secret = config.IdentityToken
}
return client.Store(c.programFunc, creds)
}
// getCredentialsFromStore executes the command to get the credentials from the native store.
func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) {
var ret types.AuthConfig
creds, err := client.Get(c.programFunc, serverAddress)
if err != nil {
if credentials.IsErrCredentialsNotFound(err) {
// do not return an error if the credentials are not
// in the keychain. Let docker ask for new credentials.
return ret, nil
}
return ret, err
}
if creds.Username == tokenUsername {
ret.IdentityToken = creds.Secret
} else {
ret.Password = creds.Secret
ret.Username = creds.Username
}
ret.ServerAddress = serverAddress
return ret, nil
}
// listCredentialsInStore returns a listing of stored credentials as a map of
// URL -> username.
func (c *nativeStore) listCredentialsInStore() (map[string]string, error) {
return client.List(c.programFunc)
}

View File

@ -0,0 +1,22 @@
package types
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}

View File

@ -0,0 +1,289 @@
// Package commandconn provides a net.Conn implementation that can be used for
// proxying (or emulating) stream via a custom command.
//
// For example, to provide an http.Client that can connect to a Docker daemon
// running in a Docker container ("DIND"):
//
// httpClient := &http.Client{
// Transport: &http.Transport{
// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) {
// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio")
// },
// },
// }
package commandconn
import (
"bytes"
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// New returns net.Conn
func New(_ context.Context, cmd string, args ...string) (net.Conn, error) {
var (
c commandConn
err error
)
c.cmd = exec.Command(cmd, args...)
// we assume that args never contains sensitive information
logrus.Debugf("commandconn: starting %s with %v", cmd, args)
c.cmd.Env = os.Environ()
c.cmd.SysProcAttr = &syscall.SysProcAttr{}
setPdeathsig(c.cmd)
createSession(c.cmd)
c.stdin, err = c.cmd.StdinPipe()
if err != nil {
return nil, err
}
c.stdout, err = c.cmd.StdoutPipe()
if err != nil {
return nil, err
}
c.cmd.Stderr = &stderrWriter{
stderrMu: &c.stderrMu,
stderr: &c.stderr,
debugPrefix: fmt.Sprintf("commandconn (%s):", cmd),
}
c.localAddr = dummyAddr{network: "dummy", s: "dummy-0"}
c.remoteAddr = dummyAddr{network: "dummy", s: "dummy-1"}
return &c, c.cmd.Start()
}
// commandConn implements net.Conn
type commandConn struct {
cmdMutex sync.Mutex // for cmd, cmdWaitErr
cmd *exec.Cmd
cmdWaitErr error
cmdExited atomic.Bool
stdin io.WriteCloser
stdout io.ReadCloser
stderrMu sync.Mutex // for stderr
stderr bytes.Buffer
stdinClosed atomic.Bool
stdoutClosed atomic.Bool
closing atomic.Bool
localAddr net.Addr
remoteAddr net.Addr
}
// kill terminates the process. On Windows it kills the process directly,
// whereas on other platforms, a SIGTERM is sent, before forcefully terminating
// the process after 3 seconds.
func (c *commandConn) kill() {
if c.cmdExited.Load() {
return
}
c.cmdMutex.Lock()
var werr error
if runtime.GOOS != "windows" {
werrCh := make(chan error)
go func() { werrCh <- c.cmd.Wait() }()
_ = c.cmd.Process.Signal(syscall.SIGTERM)
select {
case werr = <-werrCh:
case <-time.After(3 * time.Second):
_ = c.cmd.Process.Kill()
werr = <-werrCh
}
} else {
_ = c.cmd.Process.Kill()
werr = c.cmd.Wait()
}
c.cmdWaitErr = werr
c.cmdMutex.Unlock()
c.cmdExited.Store(true)
}
// handleEOF handles io.EOF errors while reading or writing from the underlying
// command pipes.
//
// When we've received an EOF we expect that the command will
// be terminated soon. As such, we call Wait() on the command
// and return EOF or the error depending on whether the command
// exited with an error.
//
// If Wait() does not return within 10s, an error is returned
func (c *commandConn) handleEOF(err error) error {
if err != io.EOF {
return err
}
c.cmdMutex.Lock()
defer c.cmdMutex.Unlock()
var werr error
if c.cmdExited.Load() {
werr = c.cmdWaitErr
} else {
werrCh := make(chan error)
go func() { werrCh <- c.cmd.Wait() }()
select {
case werr = <-werrCh:
c.cmdWaitErr = werr
c.cmdExited.Store(true)
case <-time.After(10 * time.Second):
c.stderrMu.Lock()
stderr := c.stderr.String()
c.stderrMu.Unlock()
return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr)
}
}
if werr == nil {
return err
}
c.stderrMu.Lock()
stderr := c.stderr.String()
c.stderrMu.Unlock()
return errors.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
}
func ignorableCloseError(err error) bool {
return strings.Contains(err.Error(), os.ErrClosed.Error())
}
func (c *commandConn) Read(p []byte) (int, error) {
n, err := c.stdout.Read(p)
// check after the call to Read, since
// it is blocking, and while waiting on it
// Close might get called
if c.closing.Load() {
// If we're currently closing the connection
// we don't want to call onEOF
return n, err
}
return n, c.handleEOF(err)
}
func (c *commandConn) Write(p []byte) (int, error) {
n, err := c.stdin.Write(p)
// check after the call to Write, since
// it is blocking, and while waiting on it
// Close might get called
if c.closing.Load() {
// If we're currently closing the connection
// we don't want to call onEOF
return n, err
}
return n, c.handleEOF(err)
}
// CloseRead allows commandConn to implement halfCloser
func (c *commandConn) CloseRead() error {
// NOTE: maybe already closed here
if err := c.stdout.Close(); err != nil && !ignorableCloseError(err) {
return err
}
c.stdoutClosed.Store(true)
if c.stdinClosed.Load() {
c.kill()
}
return nil
}
// CloseWrite allows commandConn to implement halfCloser
func (c *commandConn) CloseWrite() error {
// NOTE: maybe already closed here
if err := c.stdin.Close(); err != nil && !ignorableCloseError(err) {
return err
}
c.stdinClosed.Store(true)
if c.stdoutClosed.Load() {
c.kill()
}
return nil
}
// Close is the net.Conn func that gets called
// by the transport when a dial is cancelled
// due to it's context timing out. Any blocked
// Read or Write calls will be unblocked and
// return errors. It will block until the underlying
// command has terminated.
func (c *commandConn) Close() error {
c.closing.Store(true)
defer c.closing.Store(false)
if err := c.CloseRead(); err != nil {
logrus.Warnf("commandConn.Close: CloseRead: %v", err)
return err
}
if err := c.CloseWrite(); err != nil {
logrus.Warnf("commandConn.Close: CloseWrite: %v", err)
return err
}
return nil
}
func (c *commandConn) LocalAddr() net.Addr {
return c.localAddr
}
func (c *commandConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *commandConn) SetDeadline(t time.Time) error {
logrus.Debugf("unimplemented call: SetDeadline(%v)", t)
return nil
}
func (c *commandConn) SetReadDeadline(t time.Time) error {
logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t)
return nil
}
func (c *commandConn) SetWriteDeadline(t time.Time) error {
logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t)
return nil
}
type dummyAddr struct {
network string
s string
}
func (d dummyAddr) Network() string {
return d.network
}
func (d dummyAddr) String() string {
return d.s
}
type stderrWriter struct {
stderrMu *sync.Mutex
stderr *bytes.Buffer
debugPrefix string
}
func (w *stderrWriter) Write(p []byte) (int, error) {
logrus.Debugf("%s%s", w.debugPrefix, string(p))
w.stderrMu.Lock()
if w.stderr.Len() > 4096 {
w.stderr.Reset()
}
n, err := w.stderr.Write(p)
w.stderrMu.Unlock()
return n, err
}

View File

@ -0,0 +1,10 @@
package commandconn
import (
"os/exec"
"syscall"
)
func setPdeathsig(cmd *exec.Cmd) {
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
}

View File

@ -0,0 +1,9 @@
//go:build !linux
package commandconn
import (
"os/exec"
)
func setPdeathsig(*exec.Cmd) {}

View File

@ -0,0 +1,13 @@
//go:build !windows
package commandconn
import (
"os/exec"
)
func createSession(cmd *exec.Cmd) {
// for supporting ssh connection helper with ProxyCommand
// https://github.com/docker/cli/issues/1707
cmd.SysProcAttr.Setsid = true
}

View File

@ -0,0 +1,8 @@
package commandconn
import (
"os/exec"
)
func createSession(cmd *exec.Cmd) {
}

View File

@ -0,0 +1,81 @@
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
package connhelper
import (
"context"
"net"
"net/url"
"strings"
"github.com/docker/cli/cli/connhelper/commandconn"
"github.com/docker/cli/cli/connhelper/ssh"
"github.com/pkg/errors"
)
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
type ConnectionHelper struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
Host string // dummy URL used for HTTP requests. e.g. "http://docker"
}
// GetConnectionHelper returns Docker-specific connection helper for the given URL.
// GetConnectionHelper returns nil without error when no helper is registered for the scheme.
//
// ssh://<user>@<host> URL requires Docker 18.09 or later on the remote host.
func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) {
return getConnectionHelper(daemonURL, nil)
}
// GetConnectionHelperWithSSHOpts returns Docker-specific connection helper for
// the given URL, and accepts additional options for ssh connections. It returns
// nil without error when no helper is registered for the scheme.
//
// Requires Docker 18.09 or later on the remote host.
func GetConnectionHelperWithSSHOpts(daemonURL string, sshFlags []string) (*ConnectionHelper, error) {
return getConnectionHelper(daemonURL, sshFlags)
}
func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper, error) {
u, err := url.Parse(daemonURL)
if err != nil {
return nil, err
}
if u.Scheme == "ssh" {
sp, err := ssh.ParseURL(daemonURL)
if err != nil {
return nil, errors.Wrap(err, "ssh host connection is not valid")
}
return &ConnectionHelper{
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
args := []string{"docker"}
if sp.Path != "" {
args = append(args, "--host", "unix://"+sp.Path)
}
sshFlags = addSSHTimeout(sshFlags)
args = append(args, "system", "dial-stdio")
return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args(args...)...)...)
},
Host: "http://docker.example.com",
}, nil
}
// Future version may support plugins via ~/.docker/config.json. e.g. "dind"
// See docker/cli#889 for the previous discussion.
return nil, err
}
// GetCommandConnectionHelper returns Docker-specific connection helper constructed from an arbitrary command.
func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper, error) {
return &ConnectionHelper{
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
return commandconn.New(ctx, cmd, flags...)
},
Host: "http://docker.example.com",
}, nil
}
func addSSHTimeout(sshFlags []string) []string {
if !strings.Contains(strings.Join(sshFlags, ""), "ConnectTimeout") {
sshFlags = append(sshFlags, "-o ConnectTimeout=30")
}
return sshFlags
}

63
vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Package ssh provides the connection helper for ssh:// URL.
package ssh
import (
"net/url"
"github.com/pkg/errors"
)
// ParseURL parses URL
func ParseURL(daemonURL string) (*Spec, error) {
u, err := url.Parse(daemonURL)
if err != nil {
return nil, err
}
if u.Scheme != "ssh" {
return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme)
}
var sp Spec
if u.User != nil {
sp.User = u.User.Username()
if _, ok := u.User.Password(); ok {
return nil, errors.New("plain-text password is not supported")
}
}
sp.Host = u.Hostname()
if sp.Host == "" {
return nil, errors.Errorf("no host specified")
}
sp.Port = u.Port()
sp.Path = u.Path
if u.RawQuery != "" {
return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
}
if u.Fragment != "" {
return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment)
}
return &sp, err
}
// Spec of SSH URL
type Spec struct {
User string
Host string
Port string
Path string
}
// Args returns args except "ssh" itself combined with optional additional command args
func (sp *Spec) Args(add ...string) []string {
var args []string
if sp.User != "" {
args = append(args, "-l", sp.User)
}
if sp.Port != "" {
args = append(args, "-p", sp.Port)
}
args = append(args, "--", sp.Host)
args = append(args, add...)
return args
}

View File

@ -0,0 +1,6 @@
package docker
const (
// DockerEndpoint is the name of the docker endpoint in a stored context
DockerEndpoint = "docker"
)

149
vendor/github.com/docker/cli/cli/context/docker/load.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
package docker
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"net"
"net/http"
"time"
"github.com/docker/cli/cli/connhelper"
"github.com/docker/cli/cli/context"
"github.com/docker/cli/cli/context/store"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
)
// EndpointMeta is a typed wrapper around a context-store generic endpoint describing
// a Docker Engine endpoint, without its tls config
type EndpointMeta = context.EndpointMetaBase
// Endpoint is a typed wrapper around a context-store generic endpoint describing
// a Docker Engine endpoint, with its tls data
type Endpoint struct {
EndpointMeta
TLSData *context.TLSData
}
// WithTLSData loads TLS materials for the endpoint
func WithTLSData(s store.Reader, contextName string, m EndpointMeta) (Endpoint, error) {
tlsData, err := context.LoadTLSData(s, contextName, DockerEndpoint)
if err != nil {
return Endpoint{}, err
}
return Endpoint{
EndpointMeta: m,
TLSData: tlsData,
}, nil
}
// tlsConfig extracts a context docker endpoint TLS config
func (ep *Endpoint) tlsConfig() (*tls.Config, error) {
if ep.TLSData == nil && !ep.SkipTLSVerify {
// there is no specific tls config
return nil, nil
}
var tlsOpts []func(*tls.Config)
if ep.TLSData != nil && ep.TLSData.CA != nil {
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(ep.TLSData.CA) {
return nil, errors.New("failed to retrieve context tls info: ca.pem seems invalid")
}
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
cfg.RootCAs = certPool
})
}
if ep.TLSData != nil && ep.TLSData.Key != nil && ep.TLSData.Cert != nil {
keyBytes := ep.TLSData.Key
pemBlock, _ := pem.Decode(keyBytes)
if pemBlock == nil {
return nil, errors.New("no valid private key found")
}
if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
return nil, errors.New("private key is encrypted - support for encrypted private keys has been removed, see https://docs.docker.com/go/deprecated/")
}
x509cert, err := tls.X509KeyPair(ep.TLSData.Cert, keyBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to retrieve context tls info")
}
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
cfg.Certificates = []tls.Certificate{x509cert}
})
}
if ep.SkipTLSVerify {
tlsOpts = append(tlsOpts, func(cfg *tls.Config) {
cfg.InsecureSkipVerify = true
})
}
return tlsconfig.ClientDefault(tlsOpts...), nil
}
// ClientOpts returns a slice of Client options to configure an API client with this endpoint
func (ep *Endpoint) ClientOpts() ([]client.Opt, error) {
var result []client.Opt
if ep.Host != "" {
helper, err := connhelper.GetConnectionHelper(ep.Host)
if err != nil {
return nil, err
}
if helper == nil {
tlsConfig, err := ep.tlsConfig()
if err != nil {
return nil, err
}
result = append(result,
withHTTPClient(tlsConfig),
client.WithHost(ep.Host),
)
} else {
result = append(result,
client.WithHTTPClient(&http.Client{
// No TLS, and no proxy.
Transport: &http.Transport{
DialContext: helper.Dialer,
},
}),
client.WithHost(helper.Host),
client.WithDialContext(helper.Dialer),
)
}
}
result = append(result, client.WithVersionFromEnv(), client.WithAPIVersionNegotiation())
return result, nil
}
func withHTTPClient(tlsConfig *tls.Config) func(*client.Client) error {
return func(c *client.Client) error {
if tlsConfig == nil {
// Use the default HTTPClient
return nil
}
return client.WithHTTPClient(&http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: (&net.Dialer{
KeepAlive: 30 * time.Second,
Timeout: 30 * time.Second,
}).DialContext,
},
CheckRedirect: client.CheckRedirect,
})(c)
}
}
// EndpointFromContext parses a context docker endpoint metadata into a typed EndpointMeta structure
func EndpointFromContext(metadata store.Metadata) (EndpointMeta, error) {
ep, ok := metadata.Endpoints[DockerEndpoint]
if !ok {
return EndpointMeta{}, errors.New("cannot find docker endpoint in context")
}
typed, ok := ep.(EndpointMeta)
if !ok {
return EndpointMeta{}, errors.Errorf("endpoint %q is not of type EndpointMeta", DockerEndpoint)
}
return typed, nil
}

7
vendor/github.com/docker/cli/cli/context/endpoint.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package context
// EndpointMetaBase contains fields we expect to be common for most context endpoints
type EndpointMetaBase struct {
Host string `json:",omitempty"`
SkipTLSVerify bool
}

32
vendor/github.com/docker/cli/cli/context/store/doc.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Package store provides a generic way to store credentials to connect to
// virtually any kind of remote system.
// The term `context` comes from the similar feature in Kubernetes kubectl
// config files.
//
// Conceptually, a context is a set of metadata and TLS data, that can be used
// to connect to various endpoints of a remote system. TLS data and metadata
// are stored separately, so that in the future, we will be able to store
// sensitive information in a more secure way, depending on the os we are running
// on (e.g.: on Windows we could use the user Certificate Store, on macOS the
// user Keychain...).
//
// Current implementation is purely file based with the following structure:
//
// ${CONTEXT_ROOT}
// meta/
// <context id>/meta.json: contains context medata (key/value pairs) as
// well as a list of endpoints (themselves containing
// key/value pair metadata).
// tls/
// <context id>/endpoint1/: directory containing TLS data for the endpoint1
// in the corresponding context.
//
// The context store itself has absolutely no knowledge about what a docker
// endpoint should contain in term of metadata or TLS config. Client code is
// responsible for generating and parsing endpoint metadata and TLS files. The
// multi-endpoints approach of this package allows to combine many different
// endpoints in the same "context".
//
// Context IDs are actually SHA256 hashes of the context name, and are there
// only to avoid dealing with special characters in context names.
package store

View File

@ -0,0 +1,29 @@
package store
import (
"errors"
"io"
)
// LimitedReader is a fork of io.LimitedReader to override Read.
type LimitedReader struct {
R io.Reader
N int64 // max bytes remaining
}
// Read is a fork of io.LimitedReader.Read that returns an error when limit exceeded.
func (l *LimitedReader) Read(p []byte) (n int, err error) {
if l.N < 0 {
return 0, errors.New("read exceeds the defined limit")
}
if l.N == 0 {
return 0, io.EOF
}
// have to cap N + 1 otherwise we won't hit limit err
if int64(len(p)) > l.N+1 {
p = p[0 : l.N+1]
}
n, err = l.R.Read(p)
l.N -= int64(n)
return n, err
}

View File

@ -0,0 +1,167 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package store
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"sort"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/fvbommel/sortorder"
"github.com/pkg/errors"
)
const (
metadataDir = "meta"
metaFile = "meta.json"
)
type metadataStore struct {
root string
config Config
}
func (s *metadataStore) contextDir(id contextdir) string {
return filepath.Join(s.root, string(id))
}
func (s *metadataStore) createOrUpdate(meta Metadata) error {
contextDir := s.contextDir(contextdirOf(meta.Name))
if err := os.MkdirAll(contextDir, 0o755); err != nil {
return err
}
bytes, err := json.Marshal(&meta)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(contextDir, metaFile), bytes, 0o644)
}
func parseTypedOrMap(payload []byte, getter TypeGetter) (any, error) {
if len(payload) == 0 || string(payload) == "null" {
return nil, nil
}
if getter == nil {
var res map[string]any
if err := json.Unmarshal(payload, &res); err != nil {
return nil, err
}
return res, nil
}
typed := getter()
if err := json.Unmarshal(payload, typed); err != nil {
return nil, err
}
return reflect.ValueOf(typed).Elem().Interface(), nil
}
func (s *metadataStore) get(name string) (Metadata, error) {
m, err := s.getByID(contextdirOf(name))
if err != nil {
return m, errors.Wrapf(err, "context %q", name)
}
return m, nil
}
func (s *metadataStore) getByID(id contextdir) (Metadata, error) {
fileName := filepath.Join(s.contextDir(id), metaFile)
bytes, err := os.ReadFile(fileName)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return Metadata{}, errdefs.NotFound(errors.Wrap(err, "context not found"))
}
return Metadata{}, err
}
var untyped untypedContextMetadata
r := Metadata{
Endpoints: make(map[string]any),
}
if err := json.Unmarshal(bytes, &untyped); err != nil {
return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err)
}
r.Name = untyped.Name
if r.Metadata, err = parseTypedOrMap(untyped.Metadata, s.config.contextType); err != nil {
return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err)
}
for k, v := range untyped.Endpoints {
if r.Endpoints[k], err = parseTypedOrMap(v, s.config.endpointTypes[k]); err != nil {
return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err)
}
}
return r, err
}
func (s *metadataStore) remove(name string) error {
if err := os.RemoveAll(s.contextDir(contextdirOf(name))); err != nil {
return errors.Wrapf(err, "failed to remove metadata")
}
return nil
}
func (s *metadataStore) list() ([]Metadata, error) {
ctxDirs, err := listRecursivelyMetadataDirs(s.root)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, err
}
res := make([]Metadata, 0, len(ctxDirs))
for _, dir := range ctxDirs {
c, err := s.getByID(contextdir(dir))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
continue
}
return nil, errors.Wrap(err, "failed to read metadata")
}
res = append(res, c)
}
sort.Slice(res, func(i, j int) bool {
return sortorder.NaturalLess(res[i].Name, res[j].Name)
})
return res, nil
}
func isContextDir(path string) bool {
s, err := os.Stat(filepath.Join(path, metaFile))
if err != nil {
return false
}
return !s.IsDir()
}
func listRecursivelyMetadataDirs(root string) ([]string, error) {
fis, err := os.ReadDir(root)
if err != nil {
return nil, err
}
var result []string
for _, fi := range fis {
if fi.IsDir() {
if isContextDir(filepath.Join(root, fi.Name())) {
result = append(result, fi.Name())
}
subs, err := listRecursivelyMetadataDirs(filepath.Join(root, fi.Name()))
if err != nil {
return nil, err
}
for _, s := range subs {
result = append(result, filepath.Join(fi.Name(), s))
}
}
}
return result, nil
}
type untypedContextMetadata struct {
Metadata json.RawMessage `json:"metadata,omitempty"`
Endpoints map[string]json.RawMessage `json:"endpoints,omitempty"`
Name string `json:"name,omitempty"`
}

504
vendor/github.com/docker/cli/cli/context/store/store.go generated vendored Normal file
View File

@ -0,0 +1,504 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package store
import (
"archive/tar"
"archive/zip"
"bufio"
"bytes"
_ "crypto/sha256" // ensure ids can be computed
"encoding/json"
"io"
"net/http"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/docker/docker/errdefs"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$"
var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern)
// Store provides a context store for easily remembering endpoints configuration
type Store interface {
Reader
Lister
Writer
StorageInfoProvider
}
// Reader provides read-only (without list) access to context data
type Reader interface {
GetMetadata(name string) (Metadata, error)
ListTLSFiles(name string) (map[string]EndpointFiles, error)
GetTLSData(contextName, endpointName, fileName string) ([]byte, error)
}
// Lister provides listing of contexts
type Lister interface {
List() ([]Metadata, error)
}
// ReaderLister combines Reader and Lister interfaces
type ReaderLister interface {
Reader
Lister
}
// StorageInfoProvider provides more information about storage details of contexts
type StorageInfoProvider interface {
GetStorageInfo(contextName string) StorageInfo
}
// Writer provides write access to context data
type Writer interface {
CreateOrUpdate(meta Metadata) error
Remove(name string) error
ResetTLSMaterial(name string, data *ContextTLSData) error
ResetEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error
}
// ReaderWriter combines Reader and Writer interfaces
type ReaderWriter interface {
Reader
Writer
}
// Metadata contains metadata about a context and its endpoints
type Metadata struct {
Name string `json:",omitempty"`
Metadata any `json:",omitempty"`
Endpoints map[string]any `json:",omitempty"`
}
// StorageInfo contains data about where a given context is stored
type StorageInfo struct {
MetadataPath string
TLSPath string
}
// EndpointTLSData represents tls data for a given endpoint
type EndpointTLSData struct {
Files map[string][]byte
}
// ContextTLSData represents tls data for a whole context
type ContextTLSData struct {
Endpoints map[string]EndpointTLSData
}
// New creates a store from a given directory.
// If the directory does not exist or is empty, initialize it
func New(dir string, cfg Config) *ContextStore {
metaRoot := filepath.Join(dir, metadataDir)
tlsRoot := filepath.Join(dir, tlsDir)
return &ContextStore{
meta: &metadataStore{
root: metaRoot,
config: cfg,
},
tls: &tlsStore{
root: tlsRoot,
},
}
}
// ContextStore implements Store.
type ContextStore struct {
meta *metadataStore
tls *tlsStore
}
// List return all contexts.
func (s *ContextStore) List() ([]Metadata, error) {
return s.meta.list()
}
// Names return Metadata names for a Lister
func Names(s Lister) ([]string, error) {
list, err := s.List()
if err != nil {
return nil, err
}
names := make([]string, 0, len(list))
for _, item := range list {
names = append(names, item.Name)
}
return names, nil
}
// CreateOrUpdate creates or updates metadata for the context.
func (s *ContextStore) CreateOrUpdate(meta Metadata) error {
return s.meta.createOrUpdate(meta)
}
// Remove deletes the context with the given name, if found.
func (s *ContextStore) Remove(name string) error {
if err := s.meta.remove(name); err != nil {
return errors.Wrapf(err, "failed to remove context %s", name)
}
if err := s.tls.remove(name); err != nil {
return errors.Wrapf(err, "failed to remove context %s", name)
}
return nil
}
// GetMetadata returns the metadata for the context with the given name.
// It returns an errdefs.ErrNotFound if the context was not found.
func (s *ContextStore) GetMetadata(name string) (Metadata, error) {
return s.meta.get(name)
}
// ResetTLSMaterial removes TLS data for all endpoints in the context and replaces
// it with the new data.
func (s *ContextStore) ResetTLSMaterial(name string, data *ContextTLSData) error {
if err := s.tls.remove(name); err != nil {
return err
}
if data == nil {
return nil
}
for ep, files := range data.Endpoints {
for fileName, data := range files.Files {
if err := s.tls.createOrUpdate(name, ep, fileName, data); err != nil {
return err
}
}
}
return nil
}
// ResetEndpointTLSMaterial removes TLS data for the given context and endpoint,
// and replaces it with the new data.
func (s *ContextStore) ResetEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error {
if err := s.tls.removeEndpoint(contextName, endpointName); err != nil {
return err
}
if data == nil {
return nil
}
for fileName, data := range data.Files {
if err := s.tls.createOrUpdate(contextName, endpointName, fileName, data); err != nil {
return err
}
}
return nil
}
// ListTLSFiles returns the list of TLS files present for each endpoint in the
// context.
func (s *ContextStore) ListTLSFiles(name string) (map[string]EndpointFiles, error) {
return s.tls.listContextData(name)
}
// GetTLSData reads, and returns the content of the given fileName for an endpoint.
// It returns an errdefs.ErrNotFound if the file was not found.
func (s *ContextStore) GetTLSData(contextName, endpointName, fileName string) ([]byte, error) {
return s.tls.getData(contextName, endpointName, fileName)
}
// GetStorageInfo returns the paths where the Metadata and TLS data are stored
// for the context.
func (s *ContextStore) GetStorageInfo(contextName string) StorageInfo {
return StorageInfo{
MetadataPath: s.meta.contextDir(contextdirOf(contextName)),
TLSPath: s.tls.contextDir(contextName),
}
}
// ValidateContextName checks a context name is valid.
func ValidateContextName(name string) error {
if name == "" {
return errors.New("context name cannot be empty")
}
if name == "default" {
return errors.New(`"default" is a reserved context name`)
}
if !restrictedNameRegEx.MatchString(name) {
return errors.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern)
}
return nil
}
// Export exports an existing namespace into an opaque data stream
// This stream is actually a tarball containing context metadata and TLS materials, but it does
// not map 1:1 the layout of the context store (don't try to restore it manually without calling store.Import)
func Export(name string, s Reader) io.ReadCloser {
reader, writer := io.Pipe()
go func() {
tw := tar.NewWriter(writer)
defer tw.Close()
defer writer.Close()
meta, err := s.GetMetadata(name)
if err != nil {
writer.CloseWithError(err)
return
}
metaBytes, err := json.Marshal(&meta)
if err != nil {
writer.CloseWithError(err)
return
}
if err = tw.WriteHeader(&tar.Header{
Name: metaFile,
Mode: 0o644,
Size: int64(len(metaBytes)),
}); err != nil {
writer.CloseWithError(err)
return
}
if _, err = tw.Write(metaBytes); err != nil {
writer.CloseWithError(err)
return
}
tlsFiles, err := s.ListTLSFiles(name)
if err != nil {
writer.CloseWithError(err)
return
}
if err = tw.WriteHeader(&tar.Header{
Name: "tls",
Mode: 0o700,
Size: 0,
Typeflag: tar.TypeDir,
}); err != nil {
writer.CloseWithError(err)
return
}
for endpointName, endpointFiles := range tlsFiles {
if err = tw.WriteHeader(&tar.Header{
Name: path.Join("tls", endpointName),
Mode: 0o700,
Size: 0,
Typeflag: tar.TypeDir,
}); err != nil {
writer.CloseWithError(err)
return
}
for _, fileName := range endpointFiles {
data, err := s.GetTLSData(name, endpointName, fileName)
if err != nil {
writer.CloseWithError(err)
return
}
if err = tw.WriteHeader(&tar.Header{
Name: path.Join("tls", endpointName, fileName),
Mode: 0o600,
Size: int64(len(data)),
}); err != nil {
writer.CloseWithError(err)
return
}
if _, err = tw.Write(data); err != nil {
writer.CloseWithError(err)
return
}
}
}
}()
return reader
}
const (
maxAllowedFileSizeToImport int64 = 10 << 20
zipType string = "application/zip"
)
func getImportContentType(r *bufio.Reader) (string, error) {
head, err := r.Peek(512)
if err != nil && err != io.EOF {
return "", err
}
return http.DetectContentType(head), nil
}
// Import imports an exported context into a store
func Import(name string, s Writer, reader io.Reader) error {
// Buffered reader will not advance the buffer, needed to determine content type
r := bufio.NewReader(reader)
importContentType, err := getImportContentType(r)
if err != nil {
return err
}
switch importContentType {
case zipType:
return importZip(name, s, r)
default:
// Assume it's a TAR (TAR does not have a "magic number")
return importTar(name, s, r)
}
}
func isValidFilePath(p string) error {
if p != metaFile && !strings.HasPrefix(p, "tls/") {
return errors.New("unexpected context file")
}
if path.Clean(p) != p {
return errors.New("unexpected path format")
}
if strings.Contains(p, `\`) {
return errors.New(`unexpected '\' in path`)
}
return nil
}
func importTar(name string, s Writer, reader io.Reader) error {
tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
tlsData := ContextTLSData{
Endpoints: map[string]EndpointTLSData{},
}
var importedMetaFile bool
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if hdr.Typeflag != tar.TypeReg {
// skip this entry, only taking files into account
continue
}
if err := isValidFilePath(hdr.Name); err != nil {
return errors.Wrap(err, hdr.Name)
}
if hdr.Name == metaFile {
data, err := io.ReadAll(tr)
if err != nil {
return err
}
meta, err := parseMetadata(data, name)
if err != nil {
return err
}
if err := s.CreateOrUpdate(meta); err != nil {
return err
}
importedMetaFile = true
} else if strings.HasPrefix(hdr.Name, "tls/") {
data, err := io.ReadAll(tr)
if err != nil {
return err
}
if err := importEndpointTLS(&tlsData, hdr.Name, data); err != nil {
return err
}
}
}
if !importedMetaFile {
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
}
return s.ResetTLSMaterial(name, &tlsData)
}
func importZip(name string, s Writer, reader io.Reader) error {
body, err := io.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
if err != nil {
return err
}
zr, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
if err != nil {
return err
}
tlsData := ContextTLSData{
Endpoints: map[string]EndpointTLSData{},
}
var importedMetaFile bool
for _, zf := range zr.File {
fi := zf.FileInfo()
if !fi.Mode().IsRegular() {
// skip this entry, only taking regular files into account
continue
}
if err := isValidFilePath(zf.Name); err != nil {
return errors.Wrap(err, zf.Name)
}
if zf.Name == metaFile {
f, err := zf.Open()
if err != nil {
return err
}
data, err := io.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
defer f.Close()
if err != nil {
return err
}
meta, err := parseMetadata(data, name)
if err != nil {
return err
}
if err := s.CreateOrUpdate(meta); err != nil {
return err
}
importedMetaFile = true
} else if strings.HasPrefix(zf.Name, "tls/") {
f, err := zf.Open()
if err != nil {
return err
}
data, err := io.ReadAll(f)
defer f.Close()
if err != nil {
return err
}
err = importEndpointTLS(&tlsData, zf.Name, data)
if err != nil {
return err
}
}
}
if !importedMetaFile {
return errdefs.InvalidParameter(errors.New("invalid context: no metadata found"))
}
return s.ResetTLSMaterial(name, &tlsData)
}
func parseMetadata(data []byte, name string) (Metadata, error) {
var meta Metadata
if err := json.Unmarshal(data, &meta); err != nil {
return meta, err
}
if err := ValidateContextName(name); err != nil {
return Metadata{}, err
}
meta.Name = name
return meta, nil
}
func importEndpointTLS(tlsData *ContextTLSData, tlsPath string, data []byte) error {
parts := strings.SplitN(strings.TrimPrefix(tlsPath, "tls/"), "/", 2)
if len(parts) != 2 {
// TLS endpoints require archived file directory with 2 layers
// i.e. tls/{endpointName}/{fileName}
return errors.New("archive format is invalid")
}
epName := parts[0]
fileName := parts[1]
if _, ok := tlsData.Endpoints[epName]; !ok {
tlsData.Endpoints[epName] = EndpointTLSData{
Files: map[string][]byte{},
}
}
tlsData.Endpoints[epName].Files[fileName] = data
return nil
}
type contextdir string
func contextdirOf(name string) contextdir {
return contextdir(digest.FromString(name).Encoded())
}

View File

@ -0,0 +1,56 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.21
package store
// TypeGetter is a func used to determine the concrete type of a context or
// endpoint metadata by returning a pointer to an instance of the object
// eg: for a context of type DockerContext, the corresponding TypeGetter should return new(DockerContext)
type TypeGetter func() any
// NamedTypeGetter is a TypeGetter associated with a name
type NamedTypeGetter struct {
name string
typeGetter TypeGetter
}
// EndpointTypeGetter returns a NamedTypeGetter with the specified name and getter
func EndpointTypeGetter(name string, getter TypeGetter) NamedTypeGetter {
return NamedTypeGetter{
name: name,
typeGetter: getter,
}
}
// Config is used to configure the metadata marshaler of the context ContextStore
type Config struct {
contextType TypeGetter
endpointTypes map[string]TypeGetter
}
// SetEndpoint set an endpoint typing information
func (c Config) SetEndpoint(name string, getter TypeGetter) {
c.endpointTypes[name] = getter
}
// ForeachEndpointType calls cb on every endpoint type registered with the Config
func (c Config) ForeachEndpointType(cb func(string, TypeGetter) error) error {
for n, ep := range c.endpointTypes {
if err := cb(n, ep); err != nil {
return err
}
}
return nil
}
// NewConfig creates a config object
func NewConfig(contextType TypeGetter, endpoints ...NamedTypeGetter) Config {
res := Config{
contextType: contextType,
endpointTypes: make(map[string]TypeGetter),
}
for _, e := range endpoints {
res.endpointTypes[e.name] = e.typeGetter
}
return res
}

View File

@ -0,0 +1,96 @@
package store
import (
"os"
"path/filepath"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/pkg/errors"
)
const tlsDir = "tls"
type tlsStore struct {
root string
}
func (s *tlsStore) contextDir(name string) string {
return filepath.Join(s.root, string(contextdirOf(name)))
}
func (s *tlsStore) endpointDir(name, endpointName string) string {
return filepath.Join(s.contextDir(name), endpointName)
}
func (s *tlsStore) createOrUpdate(name, endpointName, filename string, data []byte) error {
parentOfRoot := filepath.Dir(s.root)
if err := os.MkdirAll(parentOfRoot, 0o755); err != nil {
return err
}
endpointDir := s.endpointDir(name, endpointName)
if err := os.MkdirAll(endpointDir, 0o700); err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(endpointDir, filename), data, 0o600)
}
func (s *tlsStore) getData(name, endpointName, filename string) ([]byte, error) {
data, err := os.ReadFile(filepath.Join(s.endpointDir(name, endpointName), filename))
if err != nil {
if os.IsNotExist(err) {
return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", name, endpointName, filename))
}
return nil, errors.Wrapf(err, "failed to read TLS data for endpoint %s", endpointName)
}
return data, nil
}
// remove deletes all TLS data for the given context.
func (s *tlsStore) remove(name string) error {
if err := os.RemoveAll(s.contextDir(name)); err != nil {
return errors.Wrapf(err, "failed to remove TLS data")
}
return nil
}
func (s *tlsStore) removeEndpoint(name, endpointName string) error {
if err := os.RemoveAll(s.endpointDir(name, endpointName)); err != nil {
return errors.Wrapf(err, "failed to remove TLS data for endpoint %s", endpointName)
}
return nil
}
func (s *tlsStore) listContextData(name string) (map[string]EndpointFiles, error) {
contextDir := s.contextDir(name)
epFSs, err := os.ReadDir(contextDir)
if err != nil {
if os.IsNotExist(err) {
return map[string]EndpointFiles{}, nil
}
return nil, errors.Wrapf(err, "failed to list TLS files for context %s", name)
}
r := make(map[string]EndpointFiles)
for _, epFS := range epFSs {
if epFS.IsDir() {
fss, err := os.ReadDir(filepath.Join(contextDir, epFS.Name()))
if os.IsNotExist(err) {
continue
}
if err != nil {
return nil, errors.Wrapf(err, "failed to list TLS files for endpoint %s", epFS.Name())
}
var files EndpointFiles
for _, fs := range fss {
if !fs.IsDir() {
files = append(files, fs.Name())
}
}
r[epFS.Name()] = files
}
}
return r, nil
}
// EndpointFiles is a slice of strings representing file names
type EndpointFiles []string

98
vendor/github.com/docker/cli/cli/context/tlsdata.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package context
import (
"os"
"github.com/docker/cli/cli/context/store"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
caKey = "ca.pem"
certKey = "cert.pem"
keyKey = "key.pem"
)
// TLSData holds ca/cert/key raw data
type TLSData struct {
CA []byte
Key []byte
Cert []byte
}
// ToStoreTLSData converts TLSData to the store representation
func (data *TLSData) ToStoreTLSData() *store.EndpointTLSData {
if data == nil {
return nil
}
result := store.EndpointTLSData{
Files: make(map[string][]byte),
}
if data.CA != nil {
result.Files[caKey] = data.CA
}
if data.Cert != nil {
result.Files[certKey] = data.Cert
}
if data.Key != nil {
result.Files[keyKey] = data.Key
}
return &result
}
// LoadTLSData loads TLS data from the store
func LoadTLSData(s store.Reader, contextName, endpointName string) (*TLSData, error) {
tlsFiles, err := s.ListTLSFiles(contextName)
if err != nil {
return nil, errors.Wrapf(err, "failed to retrieve TLS files for context %q", contextName)
}
if epTLSFiles, ok := tlsFiles[endpointName]; ok {
var tlsData TLSData
for _, f := range epTLSFiles {
data, err := s.GetTLSData(contextName, endpointName, f)
if err != nil {
return nil, errors.Wrapf(err, "failed to retrieve TLS data (%s) for context %q", f, contextName)
}
switch f {
case caKey:
tlsData.CA = data
case certKey:
tlsData.Cert = data
case keyKey:
tlsData.Key = data
default:
logrus.Warnf("unknown file in context %s TLS bundle: %s", contextName, f)
}
}
return &tlsData, nil
}
return nil, nil
}
// TLSDataFromFiles reads files into a TLSData struct (or returns nil if all paths are empty)
func TLSDataFromFiles(caPath, certPath, keyPath string) (*TLSData, error) {
var (
ca, cert, key []byte
err error
)
if caPath != "" {
if ca, err = os.ReadFile(caPath); err != nil {
return nil, err
}
}
if certPath != "" {
if cert, err = os.ReadFile(certPath); err != nil {
return nil, err
}
}
if keyPath != "" {
if key, err = os.ReadFile(keyPath); err != nil {
return nil, err
}
}
if ca == nil && cert == nil && key == nil {
return nil, nil
}
return &TLSData{CA: ca, Cert: cert, Key: key}, nil
}

37
vendor/github.com/docker/cli/cli/debug/debug.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package debug
import (
"os"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel"
)
// Enable sets the DEBUG env var to true
// and makes the logger to log at debug level.
func Enable() {
os.Setenv("DEBUG", "1")
logrus.SetLevel(logrus.DebugLevel)
}
// Disable sets the DEBUG env var to false
// and makes the logger to log at info level.
func Disable() {
os.Setenv("DEBUG", "")
logrus.SetLevel(logrus.InfoLevel)
}
// IsEnabled checks whether the debug flag is set or not.
func IsEnabled() bool {
return os.Getenv("DEBUG") != ""
}
// OTELErrorHandler is an error handler for OTEL that
// uses the CLI debug package to log messages when an error
// occurs.
//
// The default is to log to the debug level which is only
// enabled when debugging is enabled.
var OTELErrorHandler otel.ErrorHandler = otel.ErrorHandlerFunc(func(err error) {
logrus.WithError(err).Debug("otel error")
})

33
vendor/github.com/docker/cli/cli/error.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package cli
import (
"fmt"
"strings"
)
// Errors is a list of errors.
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
// all the errors that happened during the loop.
type Errors []error
func (errList Errors) Error() string {
if len(errList) < 1 {
return ""
}
out := make([]string, len(errList))
for i := range errList {
out[i] = errList[i].Error()
}
return strings.Join(out, ", ")
}
// StatusError reports an unsuccessful exit by a command.
type StatusError struct {
Status string
StatusCode int
}
func (e StatusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}

148
vendor/github.com/docker/cli/cli/flags/options.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package flags
import (
"fmt"
"os"
"path/filepath"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/opts"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// EnvEnableTLS is the name of the environment variable that can be used
// to enable TLS for client connections. When set to a non-empty value, TLS
// is enabled for API connections using TCP. For backward-compatibility, this
// environment-variable can only be used to enable TLS, not to disable.
//
// Note that TLS is always enabled implicitly if the "--tls-verify" option
// or "DOCKER_TLS_VERIFY" ([github.com/docker/docker/client.EnvTLSVerify])
// env var is set to, which could be to either enable or disable TLS certification
// validation. In both cases, TLS is enabled but, depending on the setting,
// with verification disabled.
EnvEnableTLS = "DOCKER_TLS"
// DefaultCaFile is the default filename for the CA pem file
DefaultCaFile = "ca.pem"
// DefaultKeyFile is the default filename for the key pem file
DefaultKeyFile = "key.pem"
// DefaultCertFile is the default filename for the cert pem file
DefaultCertFile = "cert.pem"
// FlagTLSVerify is the flag name for the TLS verification option
FlagTLSVerify = "tlsverify"
// FormatHelp describes the --format flag behavior for list commands
FormatHelp = `Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates`
// InspectFormatHelp describes the --format flag behavior for inspect commands
InspectFormatHelp = `Format output using a custom template:
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates`
)
var (
dockerCertPath = os.Getenv(client.EnvOverrideCertPath)
dockerTLSVerify = os.Getenv(client.EnvTLSVerify) != ""
dockerTLS = os.Getenv(EnvEnableTLS) != ""
)
// ClientOptions are the options used to configure the client cli.
type ClientOptions struct {
Debug bool
Hosts []string
LogLevel string
TLS bool
TLSVerify bool
TLSOptions *tlsconfig.Options
Context string
ConfigDir string
}
// NewClientOptions returns a new ClientOptions.
func NewClientOptions() *ClientOptions {
return &ClientOptions{}
}
// InstallFlags adds flags for the common options on the FlagSet
func (o *ClientOptions) InstallFlags(flags *pflag.FlagSet) {
configDir := config.Dir()
if dockerCertPath == "" {
dockerCertPath = configDir
}
flags.StringVar(&o.ConfigDir, "config", configDir, "Location of client config files")
flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode")
flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug", "info", "warn", "error", "fatal")`)
flags.BoolVar(&o.TLS, "tls", dockerTLS, "Use TLS; implied by --tlsverify")
flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote")
o.TLSOptions = &tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, DefaultCaFile),
CertFile: filepath.Join(dockerCertPath, DefaultCertFile),
KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile),
}
tlsOptions := o.TLSOptions
flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA")
flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file")
flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file")
// opts.ValidateHost is not used here, so as to allow connection helpers
hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, nil)
flags.VarP(hostOpt, "host", "H", "Daemon socket to connect to")
flags.StringVarP(&o.Context, "context", "c", "",
`Name of the context to use to connect to the daemon (overrides `+client.EnvOverrideHost+` env var and default context set with "docker context use")`)
}
// SetDefaultOptions sets default values for options after flag parsing is
// complete
func (o *ClientOptions) SetDefaultOptions(flags *pflag.FlagSet) {
// Regardless of whether the user sets it to true or false, if they
// specify --tlsverify at all then we need to turn on TLS
// TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need
// to check that here as well
if flags.Changed(FlagTLSVerify) || o.TLSVerify {
o.TLS = true
}
if !o.TLS {
o.TLSOptions = nil
} else {
tlsOptions := o.TLSOptions
tlsOptions.InsecureSkipVerify = !o.TLSVerify
// Reset CertFile and KeyFile to empty string if the user did not specify
// the respective flags and the respective default files were not found.
if !flags.Changed("tlscert") {
if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) {
tlsOptions.CertFile = ""
}
}
if !flags.Changed("tlskey") {
if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) {
tlsOptions.KeyFile = ""
}
}
}
}
// SetLogLevel sets the logrus logging level
func SetLogLevel(logLevel string) {
if logLevel != "" {
lvl, err := logrus.ParseLevel(logLevel)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel)
os.Exit(1)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
}

Some files were not shown because too many files have changed in this diff Show More