Fix dependencies
This commit is contained in:
		
							
								
								
									
										11
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								go.mod
									
									
									
									
									
								
							| @ -4,7 +4,6 @@ go 1.15 | ||||
|  | ||||
| require ( | ||||
| 	cloud.google.com/go v0.74.0 // indirect | ||||
| 	github.com/go-logr/logr v0.3.0 // indirect | ||||
| 	github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 | ||||
| 	github.com/google/gofuzz v1.2.0 // indirect | ||||
| 	github.com/imdario/mergo v0.3.11 // indirect | ||||
| @ -16,11 +15,9 @@ require ( | ||||
| 	golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect | ||||
| 	golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect | ||||
| 	gopkg.in/yaml.v2 v2.4.0 | ||||
| 	k8s.io/api v0.19.6 | ||||
| 	k8s.io/apimachinery v0.19.6 | ||||
| 	k8s.io/client-go v0.0.0-00010101000000-000000000000 | ||||
| 	k8s.io/api v0.18.14 | ||||
| 	k8s.io/apimachinery v0.18.14 | ||||
| 	k8s.io/client-go v0.18.14 | ||||
| ) | ||||
|  | ||||
| replace k8s.io/client-go => k8s.io/client-go v0.19.6 | ||||
|  | ||||
| replace github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.3 | ||||
| replace k8s.io/client-go => k8s.io/client-go v0.18.14 | ||||
|  | ||||
							
								
								
									
										68
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										68
									
								
								go.sum
									
									
									
									
									
								
							| @ -7,7 +7,6 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK | ||||
| cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= | ||||
| cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= | ||||
| cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= | ||||
| cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= | ||||
| cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= | ||||
| cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= | ||||
| cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= | ||||
| @ -37,14 +36,10 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX | ||||
| cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= | ||||
| dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= | ||||
| github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= | ||||
| github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= | ||||
| github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= | ||||
| github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= | ||||
| github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= | ||||
| github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= | ||||
| github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= | ||||
| github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= | ||||
| github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= | ||||
| github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= | ||||
| github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= | ||||
| github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | ||||
| @ -100,7 +95,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= | ||||
| github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= | ||||
| github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= | ||||
| github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= | ||||
| github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= | ||||
| github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= | ||||
| @ -119,7 +113,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv | ||||
| github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= | ||||
| github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= | ||||
| github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= | ||||
| @ -132,10 +125,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 | ||||
| github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= | ||||
| github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= | ||||
| github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= | ||||
| github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= | ||||
| github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= | ||||
| github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= | ||||
| github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= | ||||
| github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= | ||||
| github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= | ||||
| github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= | ||||
| @ -163,6 +152,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt | ||||
| github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= | ||||
| github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= | ||||
| @ -217,8 +207,10 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ | ||||
| github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= | ||||
| github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= | ||||
| github.com/googleapis/gnostic v0.5.3 h1:2qsuRm+bzgwSIKikigPASa2GhW8H2Dn4Qq7UxD8K/48= | ||||
| github.com/googleapis/gnostic v0.5.3/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ= | ||||
| github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= | ||||
| github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= | ||||
| github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= | ||||
| github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= | ||||
| github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= | ||||
| github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= | ||||
| github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= | ||||
| @ -278,8 +270,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv | ||||
| github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= | ||||
| github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= | ||||
| github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= | ||||
| github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= | ||||
| github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= | ||||
| github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= | ||||
| github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= | ||||
| github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | ||||
| @ -406,7 +396,6 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= | ||||
| github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | ||||
| github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | ||||
| github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= | ||||
| github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | ||||
| github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | ||||
| github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= | ||||
| @ -445,12 +434,13 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= | ||||
| go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= | ||||
| golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= | ||||
| @ -488,6 +478,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB | ||||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| @ -509,6 +500,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL | ||||
| golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| @ -527,7 +519,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R | ||||
| golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= | ||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||
| golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= | ||||
| golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||
| @ -552,6 +543,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ | ||||
| golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| @ -560,6 +552,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h | ||||
| golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| @ -572,7 +565,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w | ||||
| golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| @ -596,7 +589,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w | ||||
| golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| @ -607,6 +599,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4 | ||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= | ||||
| golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| @ -742,7 +735,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D | ||||
| google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| @ -804,8 +796,6 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= | ||||
| gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= | ||||
| gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | ||||
| honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | ||||
| honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= | ||||
| @ -814,24 +804,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh | ||||
| honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= | ||||
| honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= | ||||
| honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= | ||||
| k8s.io/api v0.19.6 h1:F3lfwgpKcKms6F1mMqkQXFzXmme8QqHTJBtBkev3TOg= | ||||
| k8s.io/api v0.19.6/go.mod h1:Plxx44Nh4zVblkJrIgxVPgPre1mvng6tXf1Sj3bs0fU= | ||||
| k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= | ||||
| k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= | ||||
| k8s.io/client-go v0.19.6 h1:vtPb33nP8DBMW+/CyuJ8fiie36c3CM1Ts6L4Tsr+PtU= | ||||
| k8s.io/client-go v0.19.6/go.mod h1:gEiS+efRlXYUEQ9Oz4lmNXlxAl5JZ8y2zbTDGhvXXnk= | ||||
| k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= | ||||
| k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= | ||||
| k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= | ||||
| k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= | ||||
| k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= | ||||
| k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= | ||||
| k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= | ||||
| k8s.io/api v0.18.14 h1:tKRYsRhfL7Hfs60rFm8sNdhWydDuk7vnBqnt8uy+i/Q= | ||||
| k8s.io/api v0.18.14/go.mod h1:rMEP0KbqUY9Bm/nbQBXtUizL9r7XvD7IV1XhnGSHsy4= | ||||
| k8s.io/apimachinery v0.18.14 h1:wH0doJJajeG0qIuQD1/yo5JrBDAsZ3olqlNXZBiauVw= | ||||
| k8s.io/apimachinery v0.18.14/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk= | ||||
| k8s.io/client-go v0.18.14 h1:9dWb5D0dBsuc2umLPuWVE07rPDmBNsggW3vvctDyJII= | ||||
| k8s.io/client-go v0.18.14/go.mod h1:fpZHBter1MB6bs+GISolsmIRsGlBEJyd0mllE0H9f2Y= | ||||
| k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= | ||||
| k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= | ||||
| k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= | ||||
| k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= | ||||
| k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= | ||||
| k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= | ||||
| k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= | ||||
| k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= | ||||
| rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= | ||||
| rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= | ||||
| rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= | ||||
| sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= | ||||
| sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= | ||||
| sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= | ||||
| sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= | ||||
| sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= | ||||
| sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= | ||||
|  | ||||
							
								
								
									
										183
									
								
								vendor/github.com/go-logr/logr/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										183
									
								
								vendor/github.com/go-logr/logr/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,183 +0,0 @@ | ||||
| # A more minimal logging API for Go | ||||
|  | ||||
| Before you consider this package, please read [this blog post by the | ||||
| inimitable Dave Cheney][warning-makes-no-sense].  I really appreciate what | ||||
| he has to say, and it largely aligns with my own experiences.  Too many | ||||
| choices of levels means inconsistent logs. | ||||
|  | ||||
| This package offers a purely abstract interface, based on these ideas but with | ||||
| a few twists.  Code can depend on just this interface and have the actual | ||||
| logging implementation be injected from callers.  Ideally only `main()` knows | ||||
| what logging implementation is being used. | ||||
|  | ||||
| # Differences from Dave's ideas | ||||
|  | ||||
| The main differences are: | ||||
|  | ||||
| 1) Dave basically proposes doing away with the notion of a logging API in favor | ||||
| of `fmt.Printf()`.  I disagree, especially when you consider things like output | ||||
| locations, timestamps, file and line decorations, and structured logging.  I | ||||
| restrict the API to just 2 types of logs: info and error. | ||||
|  | ||||
| Info logs are things you want to tell the user which are not errors.  Error | ||||
| logs are, well, errors.  If your code receives an `error` from a subordinate | ||||
| function call and is logging that `error` *and not returning it*, use error | ||||
| logs. | ||||
|  | ||||
| 2) Verbosity-levels on info logs.  This gives developers a chance to indicate | ||||
| arbitrary grades of importance for info logs, without assigning names with | ||||
| semantic meaning such as "warning", "trace", and "debug".  Superficially this | ||||
| may feel very similar, but the primary difference is the lack of semantics. | ||||
| Because verbosity is a numerical value, it's safe to assume that an app running | ||||
| with higher verbosity means more (and less important) logs will be generated. | ||||
|  | ||||
| This is a BETA grade API. | ||||
|  | ||||
| There are implementations for the following logging libraries: | ||||
|  | ||||
| - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) | ||||
| - **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) | ||||
| - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) | ||||
| - **log** (the Go standard library logger): | ||||
|   [stdr](https://github.com/go-logr/stdr) | ||||
| - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) | ||||
| - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) | ||||
| - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) | ||||
|  | ||||
| # FAQ | ||||
|  | ||||
| ## Conceptual | ||||
|  | ||||
| ## Why structured logging? | ||||
|  | ||||
| - **Structured logs are more easily queriable**: Since you've got | ||||
|   key-value pairs, it's much easier to query your structured logs for | ||||
|   particular values by filtering on the contents of a particular key -- | ||||
|   think searching request logs for error codes, Kubernetes reconcilers for | ||||
|   the name and namespace of the reconciled object, etc | ||||
|  | ||||
| - **Structured logging makes it easier to have cross-referencable logs**: | ||||
|   Similarly to searchability, if you maintain conventions around your | ||||
|   keys, it becomes easy to gather all log lines related to a particular | ||||
|   concept. | ||||
|   | ||||
| - **Structured logs allow better dimensions of filtering**: if you have | ||||
|   structure to your logs, you've got more precise control over how much | ||||
|   information is logged -- you might choose in a particular configuration | ||||
|   to log certain keys but not others, only log lines where a certain key | ||||
|   matches a certain value, etc, instead of just having v-levels and names | ||||
|   to key off of. | ||||
|  | ||||
| - **Structured logs better represent structured data**: sometimes, the | ||||
|   data that you want to log is inherently structured (think tuple-link | ||||
|   objects).  Structured logs allow you to preserve that structure when | ||||
|   outputting. | ||||
|  | ||||
| ## Why V-levels? | ||||
|  | ||||
| **V-levels give operators an easy way to control the chattiness of log | ||||
| operations**.  V-levels provide a way for a given package to distinguish | ||||
| the relative importance or verbosity of a given log message.  Then, if | ||||
| a particular logger or package is logging too many messages, the user | ||||
| of the package can simply change the v-levels for that library.  | ||||
|  | ||||
| ## Why not more named levels, like Warning? | ||||
|  | ||||
| Read [Dave Cheney's post][warning-makes-no-sense].  Then read [Differences | ||||
| from Dave's ideas](#differences-from-daves-ideas). | ||||
|  | ||||
| ## Why not allow format strings, too? | ||||
|  | ||||
| **Format strings negate many of the benefits of structured logs**: | ||||
|  | ||||
| - They're not easily searchable without resorting to fuzzy searching, | ||||
|   regular expressions, etc | ||||
|  | ||||
| - They don't store structured data well, since contents are flattened into | ||||
|   a string | ||||
|  | ||||
| - They're not cross-referencable | ||||
|  | ||||
| - They don't compress easily, since the message is not constant | ||||
|  | ||||
| (unless you turn positional parameters into key-value pairs with numerical | ||||
| keys, at which point you've gotten key-value logging with meaningless | ||||
| keys) | ||||
|  | ||||
| ## Practical | ||||
|  | ||||
| ## Why key-value pairs, and not a map? | ||||
|  | ||||
| Key-value pairs are *much* easier to optimize, especially around | ||||
| allocations.  Zap (a structured logger that inspired logr's interface) has | ||||
| [performance measurements](https://github.com/uber-go/zap#performance) | ||||
| that show this quite nicely. | ||||
|  | ||||
| While the interface ends up being a little less obvious, you get | ||||
| potentially better performance, plus avoid making users type | ||||
| `map[string]string{}` every time they want to log. | ||||
|  | ||||
| ## What if my V-levels differ between libraries? | ||||
|  | ||||
| That's fine.  Control your V-levels on a per-logger basis, and use the | ||||
| `WithName` function to pass different loggers to different libraries. | ||||
|  | ||||
| Generally, you should take care to ensure that you have relatively | ||||
| consistent V-levels within a given logger, however, as this makes deciding | ||||
| on what verbosity of logs to request easier. | ||||
|  | ||||
| ## But I *really* want to use a format string! | ||||
|  | ||||
| That's not actually a question.  Assuming your question is "how do | ||||
| I convert my mental model of logging with format strings to logging with | ||||
| constant messages": | ||||
|  | ||||
| 1. figure out what the error actually is, as you'd write in a TL;DR style, | ||||
|    and use that as a message | ||||
|  | ||||
| 2. For every place you'd write a format specifier, look to the word before | ||||
|    it, and add that as a key value pair | ||||
|  | ||||
| For instance, consider the following examples (all taken from spots in the | ||||
| Kubernetes codebase): | ||||
|  | ||||
| - `klog.V(4).Infof("Client is returning errors: code %v, error %v", | ||||
|   responseCode, err)` becomes `logger.Error(err, "client returned an | ||||
|   error", "code", responseCode)` | ||||
|  | ||||
| - `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", | ||||
|   seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after | ||||
|   response when requesting url", "attempt", retries, "after | ||||
|   seconds", seconds, "url", url)` | ||||
|  | ||||
| If you *really* must use a format string, place it as a key value, and | ||||
| call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to | ||||
| reflect over type %T")` becomes `logger.Info("unable to reflect over | ||||
| type", "type", fmt.Sprintf("%T"))`.  In general though, the cases where | ||||
| this is necessary should be few and far between. | ||||
|  | ||||
| ## How do I choose my V-levels? | ||||
|  | ||||
| This is basically the only hard constraint: increase V-levels to denote | ||||
| more verbose or more debug-y logs. | ||||
|  | ||||
| Otherwise, you can start out with `0` as "you always want to see this", | ||||
| `1` as "common logging that you might *possibly* want to turn off", and | ||||
| `10` as "I would like to performance-test your log collection stack". | ||||
|  | ||||
| Then gradually choose levels in between as you need them, working your way | ||||
| down from 10 (for debug and trace style logs) and up from 1 (for chattier | ||||
| info-type logs). | ||||
|  | ||||
| ## How do I choose my keys | ||||
|  | ||||
| - make your keys human-readable | ||||
| - constant keys are generally a good idea | ||||
| - be consistent across your codebase | ||||
| - keys should naturally match parts of the message string | ||||
|  | ||||
| While key names are mostly unrestricted (and spaces are acceptable), | ||||
| it's generally a good idea to stick to printable ascii characters, or at | ||||
| least match the general character set of your log lines. | ||||
|  | ||||
| [warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging | ||||
							
								
								
									
										35
									
								
								vendor/github.com/go-logr/logr/discard.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/go-logr/logr/discard.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,35 +0,0 @@ | ||||
| package logr | ||||
|  | ||||
| // Discard returns a valid Logger that discards all messages logged to it. | ||||
| // It can be used whenever the caller is not interested in the logs. | ||||
| func Discard() Logger { | ||||
| 	return discardLogger{} | ||||
| } | ||||
|  | ||||
| // discardLogger is a Logger that discards all messages. | ||||
| type discardLogger struct{} | ||||
|  | ||||
| func (l discardLogger) Enabled() bool { | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (l discardLogger) Info(msg string, keysAndValues ...interface{}) { | ||||
| } | ||||
|  | ||||
| func (l discardLogger) Error(err error, msg string, keysAndValues ...interface{}) { | ||||
| } | ||||
|  | ||||
| func (l discardLogger) V(level int) Logger { | ||||
| 	return l | ||||
| } | ||||
|  | ||||
| func (l discardLogger) WithValues(keysAndValues ...interface{}) Logger { | ||||
| 	return l | ||||
| } | ||||
|  | ||||
| func (l discardLogger) WithName(name string) Logger { | ||||
| 	return l | ||||
| } | ||||
|  | ||||
| // Verify that it actually implements the interface | ||||
| var _ Logger = discardLogger{} | ||||
							
								
								
									
										3
									
								
								vendor/github.com/go-logr/logr/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/go-logr/logr/go.mod
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,3 +0,0 @@ | ||||
| module github.com/go-logr/logr | ||||
|  | ||||
| go 1.14 | ||||
							
								
								
									
										222
									
								
								vendor/github.com/go-logr/logr/logr.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										222
									
								
								vendor/github.com/go-logr/logr/logr.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,222 +0,0 @@ | ||||
| /* | ||||
| Copyright 2019 The logr Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| // Package logr defines abstract interfaces for logging.  Packages can depend on | ||||
| // these interfaces and callers can implement logging in whatever way is | ||||
| // appropriate. | ||||
| // | ||||
| // This design derives from Dave Cheney's blog: | ||||
| //     http://dave.cheney.net/2015/11/05/lets-talk-about-logging | ||||
| // | ||||
| // This is a BETA grade API.  Until there is a significant 2nd implementation, | ||||
| // I don't really know how it will change. | ||||
| // | ||||
| // The logging specifically makes it non-trivial to use format strings, to encourage | ||||
| // attaching structured information instead of unstructured format strings. | ||||
| // | ||||
| // Usage | ||||
| // | ||||
| // Logging is done using a Logger.  Loggers can have name prefixes and named | ||||
| // values attached, so that all log messages logged with that Logger have some | ||||
| // base context associated. | ||||
| // | ||||
| // The term "key" is used to refer to the name associated with a particular | ||||
| // value, to disambiguate it from the general Logger name. | ||||
| // | ||||
| // For instance, suppose we're trying to reconcile the state of an object, and | ||||
| // we want to log that we've made some decision. | ||||
| // | ||||
| // With the traditional log package, we might write: | ||||
| //   log.Printf("decided to set field foo to value %q for object %s/%s", | ||||
| //       targetValue, object.Namespace, object.Name) | ||||
| // | ||||
| // With logr's structured logging, we'd write: | ||||
| //   // elsewhere in the file, set up the logger to log with the prefix of | ||||
| //   // "reconcilers", and the named value target-type=Foo, for extra context. | ||||
| //   log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") | ||||
| // | ||||
| //   // later on... | ||||
| //   log.Info("setting foo on object", "value", targetValue, "object", object) | ||||
| // | ||||
| // Depending on our logging implementation, we could then make logging decisions | ||||
| // based on field values (like only logging such events for objects in a certain | ||||
| // namespace), or copy the structured information into a structured log store. | ||||
| // | ||||
| // For logging errors, Logger has a method called Error.  Suppose we wanted to | ||||
| // log an error while reconciling.  With the traditional log package, we might | ||||
| // write: | ||||
| //   log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) | ||||
| // | ||||
| // With logr, we'd instead write: | ||||
| //   // assuming the above setup for log | ||||
| //   log.Error(err, "unable to reconcile object", "object", object) | ||||
| // | ||||
| // This functions similarly to: | ||||
| //   log.Info("unable to reconcile object", "error", err, "object", object) | ||||
| // | ||||
| // However, it ensures that a standard key for the error value ("error") is used | ||||
| // across all error logging.  Furthermore, certain implementations may choose to | ||||
| // attach additional information (such as stack traces) on calls to Error, so | ||||
| // it's preferred to use Error to log errors. | ||||
| // | ||||
| // Parts of a log line | ||||
| // | ||||
| // Each log message from a Logger has four types of context: | ||||
| // logger name, log verbosity, log message, and the named values. | ||||
| // | ||||
| // The Logger name consists of a series of name "segments" added by successive | ||||
| // calls to WithName.  These name segments will be joined in some way by the | ||||
| // underlying implementation.  It is strongly recommended that name segments | ||||
| // contain simple identifiers (letters, digits, and hyphen), and do not contain | ||||
| // characters that could muddle the log output or confuse the joining operation | ||||
| // (e.g.  whitespace, commas, periods, slashes, brackets, quotes, etc). | ||||
| // | ||||
| // Log verbosity represents how little a log matters.  Level zero, the default, | ||||
| // matters most.  Increasing levels matter less and less.  Try to avoid lots of | ||||
| // different verbosity levels, and instead provide useful keys, logger names, | ||||
| // and log messages for users to filter on.  It's illegal to pass a log level | ||||
| // below zero. | ||||
| // | ||||
| // The log message consists of a constant message attached to the log line. | ||||
| // This should generally be a simple description of what's occurring, and should | ||||
| // never be a format string. | ||||
| // | ||||
| // Variable information can then be attached using named values (key/value | ||||
| // pairs).  Keys are arbitrary strings, while values may be any Go value. | ||||
| // | ||||
| // Key Naming Conventions | ||||
| // | ||||
| // Keys are not strictly required to conform to any specification or regex, but | ||||
| // it is recommended that they: | ||||
| //   * be human-readable and meaningful (not auto-generated or simple ordinals) | ||||
| //   * be constant (not dependent on input data) | ||||
| //   * contain only printable characters | ||||
| //   * not contain whitespace or punctuation | ||||
| // | ||||
| // These guidelines help ensure that log data is processed properly regardless | ||||
| // of the log implementation.  For example, log implementations will try to | ||||
| // output JSON data or will store data for later database (e.g. SQL) queries. | ||||
| // | ||||
| // While users are generally free to use key names of their choice, it's | ||||
| // generally best to avoid using the following keys, as they're frequently used | ||||
| // by implementations: | ||||
| // | ||||
| // - `"caller"`: the calling information (file/line) of a particular log line. | ||||
| // - `"error"`: the underlying error value in the `Error` method. | ||||
| // - `"level"`: the log level. | ||||
| // - `"logger"`: the name of the associated logger. | ||||
| // - `"msg"`: the log message. | ||||
| // - `"stacktrace"`: the stack trace associated with a particular log line or | ||||
| //                   error (often from the `Error` message). | ||||
| // - `"ts"`: the timestamp for a log line. | ||||
| // | ||||
| // Implementations are encouraged to make use of these keys to represent the | ||||
| // above concepts, when necessary (for example, in a pure-JSON output form, it | ||||
| // would be necessary to represent at least message and timestamp as ordinary | ||||
| // named values). | ||||
| // | ||||
| // Implementations may choose to give callers access to the underlying | ||||
| // logging implementation.  The recommended pattern for this is: | ||||
| //   // Underlier exposes access to the underlying logging implementation. | ||||
| //   // Since callers only have a logr.Logger, they have to know which | ||||
| //   // implementation is in use, so this interface is less of an abstraction | ||||
| //   // and more of way to test type conversion. | ||||
| //   type Underlier interface { | ||||
| //       GetUnderlying() <underlying-type> | ||||
| //   } | ||||
| package logr | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| ) | ||||
|  | ||||
| // TODO: consider adding back in format strings if they're really needed | ||||
| // TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) | ||||
| // TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats | ||||
|  | ||||
| // Logger represents the ability to log messages, both errors and not. | ||||
| type Logger interface { | ||||
| 	// Enabled tests whether this Logger is enabled.  For example, commandline | ||||
| 	// flags might be used to set the logging verbosity and disable some info | ||||
| 	// logs. | ||||
| 	Enabled() bool | ||||
|  | ||||
| 	// Info logs a non-error message with the given key/value pairs as context. | ||||
| 	// | ||||
| 	// The msg argument should be used to add some constant description to | ||||
| 	// the log line.  The key/value pairs can then be used to add additional | ||||
| 	// variable information.  The key/value pairs should alternate string | ||||
| 	// keys and arbitrary values. | ||||
| 	Info(msg string, keysAndValues ...interface{}) | ||||
|  | ||||
| 	// Error logs an error, with the given message and key/value pairs as context. | ||||
| 	// It functions similarly to calling Info with the "error" named value, but may | ||||
| 	// have unique behavior, and should be preferred for logging errors (see the | ||||
| 	// package documentations for more information). | ||||
| 	// | ||||
| 	// The msg field should be used to add context to any underlying error, | ||||
| 	// while the err field should be used to attach the actual error that | ||||
| 	// triggered this log line, if present. | ||||
| 	Error(err error, msg string, keysAndValues ...interface{}) | ||||
|  | ||||
| 	// V returns an Logger value for a specific verbosity level, relative to | ||||
| 	// this Logger.  In other words, V values are additive.  V higher verbosity | ||||
| 	// level means a log message is less important.  It's illegal to pass a log | ||||
| 	// level less than zero. | ||||
| 	V(level int) Logger | ||||
|  | ||||
| 	// WithValues adds some key-value pairs of context to a logger. | ||||
| 	// See Info for documentation on how key/value pairs work. | ||||
| 	WithValues(keysAndValues ...interface{}) Logger | ||||
|  | ||||
| 	// WithName adds a new element to the logger's name. | ||||
| 	// Successive calls with WithName continue to append | ||||
| 	// suffixes to the logger's name.  It's strongly recommended | ||||
| 	// that name segments contain only letters, digits, and hyphens | ||||
| 	// (see the package documentation for more information). | ||||
| 	WithName(name string) Logger | ||||
| } | ||||
|  | ||||
| // InfoLogger provides compatibility with code that relies on the v0.1.0 interface | ||||
| // Deprecated: use Logger instead. This will be removed in a future release. | ||||
| type InfoLogger = Logger | ||||
|  | ||||
| type contextKey struct{} | ||||
|  | ||||
| // FromContext returns a Logger constructed from ctx or nil if no | ||||
| // logger details are found. | ||||
| func FromContext(ctx context.Context) Logger { | ||||
| 	if v, ok := ctx.Value(contextKey{}).(Logger); ok { | ||||
| 		return v | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // FromContextOrDiscard returns a Logger constructed from ctx or a Logger | ||||
| // that discards all messages if no logger details are found. | ||||
| func FromContextOrDiscard(ctx context.Context) Logger { | ||||
| 	if v, ok := ctx.Value(contextKey{}).(Logger); ok { | ||||
| 		return v | ||||
| 	} | ||||
|  | ||||
| 	return discardLogger{} | ||||
| } | ||||
|  | ||||
| // NewContext returns a new context derived from ctx that embeds the Logger. | ||||
| func NewContext(ctx context.Context, l Logger) context.Context { | ||||
| 	return context.WithValue(ctx, contextKey{}, l) | ||||
| } | ||||
							
								
								
									
										3837
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3837
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										8291
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8291
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										7
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2020 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -41,9 +41,6 @@ option java_package = "org.openapi_v2"; | ||||
| // the future. 'GPB' is reserved for the protocol buffer implementation itself. | ||||
| option objc_class_prefix = "OAS"; | ||||
|  | ||||
| // The Go package name. | ||||
| option go_package = "openapiv2;openapi_v2"; | ||||
|  | ||||
| message AdditionalPropertiesItem { | ||||
|   oneof oneof { | ||||
|     Schema schema = 1; | ||||
| @ -556,7 +553,7 @@ message Response { | ||||
|   repeated NamedAny vendor_extension = 5; | ||||
| } | ||||
|  | ||||
| // One or more JSON representations for responses | ||||
| // One or more JSON representations for parameters | ||||
| message ResponseDefinitions { | ||||
|   repeated NamedResponse additional_properties = 1; | ||||
| } | ||||
|  | ||||
							
								
								
									
										20
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,14 +1,16 @@ | ||||
| # OpenAPI v2 Protocol Buffer Models | ||||
|  | ||||
| This directory contains a Protocol Buffer-language model and related code for | ||||
| supporting OpenAPI v2. | ||||
| This directory contains a Protocol Buffer-language model | ||||
| and related code for supporting OpenAPI v2. | ||||
|  | ||||
| Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol | ||||
| Buffer support code for their preferred languages. | ||||
| Gnostic applications and plugins can use OpenAPIv2.proto | ||||
| to generate Protocol Buffer support code for their preferred languages. | ||||
|  | ||||
| OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into | ||||
| the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. | ||||
| OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI  | ||||
| descriptions into the Protocol Buffer-based datastructures  | ||||
| generated from OpenAPIv2.proto. | ||||
|  | ||||
| OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler | ||||
| generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer | ||||
| compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. | ||||
| OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic  | ||||
| compiler generator, and OpenAPIv2.pb.go is generated by  | ||||
| protoc, the Protocol Buffer compiler, and protoc-gen-go, the | ||||
| Protocol Buffer Go code generation plugin. | ||||
|  | ||||
							
								
								
									
										41
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										41
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,41 +0,0 @@ | ||||
| // Copyright 2020 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package openapi_v2 | ||||
|  | ||||
| import ( | ||||
| 	"github.com/googleapis/gnostic/compiler" | ||||
| 	"gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. | ||||
| func ParseDocument(b []byte) (*Document, error) { | ||||
| 	info, err := compiler.ReadInfoFromBytes("", b) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	root := info.Content[0] | ||||
| 	return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) | ||||
| } | ||||
|  | ||||
| // YAMLValue produces a serialized YAML representation of the document. | ||||
| func (d *Document) YAMLValue(comment string) ([]byte, error) { | ||||
| 	rawInfo := d.ToRawInfo() | ||||
| 	rawInfo = &yaml.Node{ | ||||
| 		Kind:        yaml.DocumentNode, | ||||
| 		Content:     []*yaml.Node{rawInfo}, | ||||
| 		HeadComment: comment, | ||||
| 	} | ||||
| 	return yaml.Marshal(rawInfo) | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -203,7 +203,7 @@ | ||||
|       "additionalProperties": { | ||||
|         "$ref": "#/definitions/response" | ||||
|       }, | ||||
|       "description": "One or more JSON representations for responses" | ||||
|       "description": "One or more JSON representations for parameters" | ||||
|     }, | ||||
|     "externalDocs": { | ||||
|       "type": "object", | ||||
| @ -1607,4 +1607,4 @@ | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
| } | ||||
|  | ||||
							
								
								
									
										3
									
								
								vendor/github.com/googleapis/gnostic/compiler/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/googleapis/gnostic/compiler/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,3 @@ | ||||
| # Compiler support code | ||||
|  | ||||
| This directory contains compiler support code used by Gnostic and Gnostic | ||||
| extensions. | ||||
| This directory contains compiler support code used by Gnostic and Gnostic extensions. | ||||
							
								
								
									
										20
									
								
								vendor/github.com/googleapis/gnostic/compiler/context.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/googleapis/gnostic/compiler/context.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -14,36 +14,30 @@ | ||||
|  | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	yaml "gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| // Context contains state of the compiler as it traverses a document. | ||||
| type Context struct { | ||||
| 	Parent            *Context | ||||
| 	Name              string | ||||
| 	Node              *yaml.Node | ||||
| 	ExtensionHandlers *[]ExtensionHandler | ||||
| } | ||||
|  | ||||
| // NewContextWithExtensions returns a new object representing the compiler state | ||||
| func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { | ||||
| 	return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} | ||||
| func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { | ||||
| 	return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} | ||||
| } | ||||
|  | ||||
| // NewContext returns a new object representing the compiler state | ||||
| func NewContext(name string, node *yaml.Node, parent *Context) *Context { | ||||
| func NewContext(name string, parent *Context) *Context { | ||||
| 	if parent != nil { | ||||
| 		return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} | ||||
| 		return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} | ||||
| 	} | ||||
| 	return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} | ||||
| } | ||||
|  | ||||
| // Description returns a text description of the compiler state | ||||
| func (context *Context) Description() string { | ||||
| 	name := context.Name | ||||
| 	if context.Parent != nil { | ||||
| 		name = context.Parent.Description() + "." + name | ||||
| 		return context.Parent.Description() + "." + context.Name | ||||
| 	} | ||||
| 	return name | ||||
| 	return context.Name | ||||
| } | ||||
|  | ||||
							
								
								
									
										15
									
								
								vendor/github.com/googleapis/gnostic/compiler/error.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/googleapis/gnostic/compiler/error.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -14,8 +14,6 @@ | ||||
|  | ||||
| package compiler | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Error represents compiler errors and their location in the document. | ||||
| type Error struct { | ||||
| 	Context *Context | ||||
| @ -27,19 +25,12 @@ func NewError(context *Context, message string) *Error { | ||||
| 	return &Error{Context: context, Message: message} | ||||
| } | ||||
|  | ||||
| func (err *Error) locationDescription() string { | ||||
| 	if err.Context.Node != nil { | ||||
| 		return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) | ||||
| 	} | ||||
| 	return err.Context.Description() | ||||
| } | ||||
|  | ||||
| // Error returns the string value of an Error. | ||||
| func (err *Error) Error() string { | ||||
| 	if err.Context == nil { | ||||
| 		return err.Message | ||||
| 		return "ERROR " + err.Message | ||||
| 	} | ||||
| 	return err.locationDescription() + " " + err.Message | ||||
| 	return "ERROR " + err.Context.Description() + " " + err.Message | ||||
| } | ||||
|  | ||||
| // ErrorGroup is a container for groups of Error values. | ||||
|  | ||||
							
								
								
									
										101
									
								
								vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,101 @@ | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"os/exec" | ||||
|  | ||||
| 	"strings" | ||||
|  | ||||
| 	"errors" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/golang/protobuf/ptypes/any" | ||||
| 	ext_plugin "github.com/googleapis/gnostic/extensions" | ||||
| 	yaml "gopkg.in/yaml.v2" | ||||
| ) | ||||
|  | ||||
| // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. | ||||
| type ExtensionHandler struct { | ||||
| 	Name string | ||||
| } | ||||
|  | ||||
| // HandleExtension calls a binary extension handler. | ||||
| func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { | ||||
| 	handled := false | ||||
| 	var errFromPlugin error | ||||
| 	var outFromPlugin *any.Any | ||||
|  | ||||
| 	if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { | ||||
| 		for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { | ||||
| 			outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) | ||||
| 			if outFromPlugin == nil { | ||||
| 				continue | ||||
| 			} else { | ||||
| 				handled = true | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return handled, outFromPlugin, errFromPlugin | ||||
| } | ||||
|  | ||||
| func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { | ||||
| 	if extensionHandlers.Name != "" { | ||||
| 		binary, _ := yaml.Marshal(in) | ||||
|  | ||||
| 		request := &ext_plugin.ExtensionHandlerRequest{} | ||||
|  | ||||
| 		version := &ext_plugin.Version{} | ||||
| 		version.Major = 0 | ||||
| 		version.Minor = 1 | ||||
| 		version.Patch = 0 | ||||
| 		request.CompilerVersion = version | ||||
|  | ||||
| 		request.Wrapper = &ext_plugin.Wrapper{} | ||||
|  | ||||
| 		request.Wrapper.Version = "v2" | ||||
| 		request.Wrapper.Yaml = string(binary) | ||||
| 		request.Wrapper.ExtensionName = extensionName | ||||
|  | ||||
| 		requestBytes, _ := proto.Marshal(request) | ||||
| 		cmd := exec.Command(extensionHandlers.Name) | ||||
| 		cmd.Stdin = bytes.NewReader(requestBytes) | ||||
| 		output, err := cmd.Output() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			fmt.Printf("Error: %+v\n", err) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		response := &ext_plugin.ExtensionHandlerResponse{} | ||||
| 		err = proto.Unmarshal(output, response) | ||||
| 		if err != nil { | ||||
| 			fmt.Printf("Error: %+v\n", err) | ||||
| 			fmt.Printf("%s\n", string(output)) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if !response.Handled { | ||||
| 			return nil, nil | ||||
| 		} | ||||
| 		if len(response.Error) != 0 { | ||||
| 			message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) | ||||
| 			return nil, errors.New(message) | ||||
| 		} | ||||
| 		return response.Value, nil | ||||
| 	} | ||||
| 	return nil, nil | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/github.com/googleapis/gnostic/compiler/extensions.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/googleapis/gnostic/compiler/extensions.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,85 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"os/exec" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/golang/protobuf/ptypes/any" | ||||
| 	extensions "github.com/googleapis/gnostic/extensions" | ||||
| 	yaml "gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. | ||||
| type ExtensionHandler struct { | ||||
| 	Name string | ||||
| } | ||||
|  | ||||
| // CallExtension calls a binary extension handler. | ||||
| func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { | ||||
| 	if context == nil || context.ExtensionHandlers == nil { | ||||
| 		return false, nil, nil | ||||
| 	} | ||||
| 	handled = false | ||||
| 	for _, handler := range *(context.ExtensionHandlers) { | ||||
| 		response, err = handler.handle(in, extensionName) | ||||
| 		if response == nil { | ||||
| 			continue | ||||
| 		} else { | ||||
| 			handled = true | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return handled, response, err | ||||
| } | ||||
|  | ||||
| func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { | ||||
| 	if extensionHandlers.Name != "" { | ||||
| 		yamlData, _ := yaml.Marshal(in) | ||||
| 		request := &extensions.ExtensionHandlerRequest{ | ||||
| 			CompilerVersion: &extensions.Version{ | ||||
| 				Major: 0, | ||||
| 				Minor: 1, | ||||
| 				Patch: 0, | ||||
| 			}, | ||||
| 			Wrapper: &extensions.Wrapper{ | ||||
| 				Version:       "unknown", // TODO: set this to the type/version of spec being parsed. | ||||
| 				Yaml:          string(yamlData), | ||||
| 				ExtensionName: extensionName, | ||||
| 			}, | ||||
| 		} | ||||
| 		requestBytes, _ := proto.Marshal(request) | ||||
| 		cmd := exec.Command(extensionHandlers.Name) | ||||
| 		cmd.Stdin = bytes.NewReader(requestBytes) | ||||
| 		output, err := cmd.Output() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		response := &extensions.ExtensionHandlerResponse{} | ||||
| 		err = proto.Unmarshal(output, response) | ||||
| 		if err != nil || !response.Handled { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if len(response.Errors) != 0 { | ||||
| 			return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) | ||||
| 		} | ||||
| 		return response.Value, nil | ||||
| 	} | ||||
| 	return nil, nil | ||||
| } | ||||
							
								
								
									
										347
									
								
								vendor/github.com/googleapis/gnostic/compiler/helpers.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										347
									
								
								vendor/github.com/googleapis/gnostic/compiler/helpers.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -16,63 +16,56 @@ package compiler | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"gopkg.in/yaml.v2" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/googleapis/gnostic/jsonschema" | ||||
| 	"gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| // compiler helper functions, usually called from generated code | ||||
|  | ||||
| // UnpackMap gets a *yaml.Node if possible. | ||||
| func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { | ||||
| 	if in == nil { | ||||
| 		return nil, false | ||||
| // UnpackMap gets a yaml.MapSlice if possible. | ||||
| func UnpackMap(in interface{}) (yaml.MapSlice, bool) { | ||||
| 	m, ok := in.(yaml.MapSlice) | ||||
| 	if ok { | ||||
| 		return m, true | ||||
| 	} | ||||
| 	return in, true | ||||
| 	// do we have an empty array? | ||||
| 	a, ok := in.([]interface{}) | ||||
| 	if ok && len(a) == 0 { | ||||
| 		// if so, return an empty map | ||||
| 		return yaml.MapSlice{}, true | ||||
| 	} | ||||
| 	return nil, false | ||||
| } | ||||
|  | ||||
| // SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. | ||||
| func SortedKeysForMap(m *yaml.Node) []string { | ||||
| // SortedKeysForMap returns the sorted keys of a yaml.MapSlice. | ||||
| func SortedKeysForMap(m yaml.MapSlice) []string { | ||||
| 	keys := make([]string, 0) | ||||
| 	if m.Kind == yaml.MappingNode { | ||||
| 		for i := 0; i < len(m.Content); i += 2 { | ||||
| 			keys = append(keys, m.Content[i].Value) | ||||
| 		} | ||||
| 	for _, item := range m { | ||||
| 		keys = append(keys, item.Key.(string)) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	return keys | ||||
| } | ||||
|  | ||||
| // MapHasKey returns true if a yamlv2.MapSlice contains a specified key. | ||||
| func MapHasKey(m *yaml.Node, key string) bool { | ||||
| 	if m == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if m.Kind == yaml.MappingNode { | ||||
| 		for i := 0; i < len(m.Content); i += 2 { | ||||
| 			itemKey := m.Content[i].Value | ||||
| 			if key == itemKey { | ||||
| 				return true | ||||
| 			} | ||||
| // MapHasKey returns true if a yaml.MapSlice contains a specified key. | ||||
| func MapHasKey(m yaml.MapSlice, key string) bool { | ||||
| 	for _, item := range m { | ||||
| 		itemKey, ok := item.Key.(string) | ||||
| 		if ok && key == itemKey { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // MapValueForKey gets the value of a map value for a specified key. | ||||
| func MapValueForKey(m *yaml.Node, key string) *yaml.Node { | ||||
| 	if m == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if m.Kind == yaml.MappingNode { | ||||
| 		for i := 0; i < len(m.Content); i += 2 { | ||||
| 			itemKey := m.Content[i].Value | ||||
| 			if key == itemKey { | ||||
| 				return m.Content[i+1] | ||||
| 			} | ||||
| func MapValueForKey(m yaml.MapSlice, key string) interface{} { | ||||
| 	for _, item := range m { | ||||
| 		itemKey, ok := item.Key.(string) | ||||
| 		if ok && key == itemKey { | ||||
| 			return item.Value | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| @ -90,118 +83,8 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { | ||||
| 	return stringArray | ||||
| } | ||||
|  | ||||
| // SequenceNodeForNode returns a node if it is a SequenceNode. | ||||
| func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { | ||||
| 	if node.Kind != yaml.SequenceNode { | ||||
| 		return nil, false | ||||
| 	} | ||||
| 	return node, true | ||||
| } | ||||
|  | ||||
| // BoolForScalarNode returns the bool value of a node. | ||||
| func BoolForScalarNode(node *yaml.Node) (bool, bool) { | ||||
| 	if node == nil { | ||||
| 		return false, false | ||||
| 	} | ||||
| 	if node.Kind == yaml.DocumentNode { | ||||
| 		return BoolForScalarNode(node.Content[0]) | ||||
| 	} | ||||
| 	if node.Kind != yaml.ScalarNode { | ||||
| 		return false, false | ||||
| 	} | ||||
| 	if node.Tag != "!!bool" { | ||||
| 		return false, false | ||||
| 	} | ||||
| 	v, err := strconv.ParseBool(node.Value) | ||||
| 	if err != nil { | ||||
| 		return false, false | ||||
| 	} | ||||
| 	return v, true | ||||
| } | ||||
|  | ||||
| // IntForScalarNode returns the integer value of a node. | ||||
| func IntForScalarNode(node *yaml.Node) (int64, bool) { | ||||
| 	if node == nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	if node.Kind == yaml.DocumentNode { | ||||
| 		return IntForScalarNode(node.Content[0]) | ||||
| 	} | ||||
| 	if node.Kind != yaml.ScalarNode { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	if node.Tag != "!!int" { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	v, err := strconv.ParseInt(node.Value, 10, 64) | ||||
| 	if err != nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	return v, true | ||||
| } | ||||
|  | ||||
| // FloatForScalarNode returns the float value of a node. | ||||
| func FloatForScalarNode(node *yaml.Node) (float64, bool) { | ||||
| 	if node == nil { | ||||
| 		return 0.0, false | ||||
| 	} | ||||
| 	if node.Kind == yaml.DocumentNode { | ||||
| 		return FloatForScalarNode(node.Content[0]) | ||||
| 	} | ||||
| 	if node.Kind != yaml.ScalarNode { | ||||
| 		return 0.0, false | ||||
| 	} | ||||
| 	if (node.Tag != "!!int") && (node.Tag != "!!float") { | ||||
| 		return 0.0, false | ||||
| 	} | ||||
| 	v, err := strconv.ParseFloat(node.Value, 64) | ||||
| 	if err != nil { | ||||
| 		return 0.0, false | ||||
| 	} | ||||
| 	return v, true | ||||
| } | ||||
|  | ||||
| // StringForScalarNode returns the string value of a node. | ||||
| func StringForScalarNode(node *yaml.Node) (string, bool) { | ||||
| 	if node == nil { | ||||
| 		return "", false | ||||
| 	} | ||||
| 	if node.Kind == yaml.DocumentNode { | ||||
| 		return StringForScalarNode(node.Content[0]) | ||||
| 	} | ||||
| 	switch node.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		switch node.Tag { | ||||
| 		case "!!int": | ||||
| 			return node.Value, true | ||||
| 		case "!!str": | ||||
| 			return node.Value, true | ||||
| 		case "!!timestamp": | ||||
| 			return node.Value, true | ||||
| 		case "!!null": | ||||
| 			return "", true | ||||
| 		default: | ||||
| 			return "", false | ||||
| 		} | ||||
| 	default: | ||||
| 		return "", false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. | ||||
| func StringArrayForSequenceNode(node *yaml.Node) []string { | ||||
| 	stringArray := make([]string, 0) | ||||
| 	for _, item := range node.Content { | ||||
| 		v, ok := StringForScalarNode(item) | ||||
| 		if ok { | ||||
| 			stringArray = append(stringArray, v) | ||||
| 		} | ||||
| 	} | ||||
| 	return stringArray | ||||
| } | ||||
|  | ||||
| // MissingKeysInMap identifies which keys from a list of required keys are not in a map. | ||||
| func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { | ||||
| func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { | ||||
| 	missingKeys := make([]string, 0) | ||||
| 	for _, k := range requiredKeys { | ||||
| 		if !MapHasKey(m, k) { | ||||
| @ -212,109 +95,64 @@ func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { | ||||
| } | ||||
|  | ||||
| // InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. | ||||
| func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { | ||||
| func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { | ||||
| 	invalidKeys := make([]string, 0) | ||||
| 	if m == nil || m.Kind != yaml.MappingNode { | ||||
| 		return invalidKeys | ||||
| 	} | ||||
| 	for i := 0; i < len(m.Content); i += 2 { | ||||
| 		key := m.Content[i].Value | ||||
| 		found := false | ||||
| 		// does the key match an allowed key? | ||||
| 		for _, allowedKey := range allowedKeys { | ||||
| 			if key == allowedKey { | ||||
| 				found = true | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if !found { | ||||
| 			// does the key match an allowed pattern? | ||||
| 			for _, allowedPattern := range allowedPatterns { | ||||
| 				if allowedPattern.MatchString(key) { | ||||
| 	for _, item := range m { | ||||
| 		itemKey, ok := item.Key.(string) | ||||
| 		if ok { | ||||
| 			key := itemKey | ||||
| 			found := false | ||||
| 			// does the key match an allowed key? | ||||
| 			for _, allowedKey := range allowedKeys { | ||||
| 				if key == allowedKey { | ||||
| 					found = true | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			if !found { | ||||
| 				invalidKeys = append(invalidKeys, key) | ||||
| 				// does the key match an allowed pattern? | ||||
| 				for _, allowedPattern := range allowedPatterns { | ||||
| 					if allowedPattern.MatchString(key) { | ||||
| 						found = true | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 				if !found { | ||||
| 					invalidKeys = append(invalidKeys, key) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return invalidKeys | ||||
| } | ||||
|  | ||||
| // NewNullNode creates a new Null node. | ||||
| func NewNullNode() *yaml.Node { | ||||
| 	node := &yaml.Node{ | ||||
| 		Kind: yaml.ScalarNode, | ||||
| 		Tag:  "!!null", | ||||
| // DescribeMap describes a map (for debugging purposes). | ||||
| func DescribeMap(in interface{}, indent string) string { | ||||
| 	description := "" | ||||
| 	m, ok := in.(map[string]interface{}) | ||||
| 	if ok { | ||||
| 		keys := make([]string, 0) | ||||
| 		for k := range m { | ||||
| 			keys = append(keys, k) | ||||
| 		} | ||||
| 		sort.Strings(keys) | ||||
| 		for _, k := range keys { | ||||
| 			v := m[k] | ||||
| 			description += fmt.Sprintf("%s%s:\n", indent, k) | ||||
| 			description += DescribeMap(v, indent+"  ") | ||||
| 		} | ||||
| 		return description | ||||
| 	} | ||||
| 	return node | ||||
| } | ||||
|  | ||||
| // NewMappingNode creates a new Mapping node. | ||||
| func NewMappingNode() *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:    yaml.MappingNode, | ||||
| 		Content: make([]*yaml.Node, 0), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewSequenceNode creates a new Sequence node. | ||||
| func NewSequenceNode() *yaml.Node { | ||||
| 	node := &yaml.Node{ | ||||
| 		Kind:    yaml.SequenceNode, | ||||
| 		Content: make([]*yaml.Node, 0), | ||||
| 	} | ||||
| 	return node | ||||
| } | ||||
|  | ||||
| // NewScalarNodeForString creates a new node to hold a string. | ||||
| func NewScalarNodeForString(s string) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!str", | ||||
| 		Value: s, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewSequenceNodeForStringArray creates a new node to hold an array of strings. | ||||
| func NewSequenceNodeForStringArray(strings []string) *yaml.Node { | ||||
| 	node := &yaml.Node{ | ||||
| 		Kind:    yaml.SequenceNode, | ||||
| 		Content: make([]*yaml.Node, 0), | ||||
| 	} | ||||
| 	for _, s := range strings { | ||||
| 		node.Content = append(node.Content, NewScalarNodeForString(s)) | ||||
| 	} | ||||
| 	return node | ||||
| } | ||||
|  | ||||
| // NewScalarNodeForBool creates a new node to hold a bool. | ||||
| func NewScalarNodeForBool(b bool) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!bool", | ||||
| 		Value: fmt.Sprintf("%t", b), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewScalarNodeForFloat creates a new node to hold a float. | ||||
| func NewScalarNodeForFloat(f float64) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!float", | ||||
| 		Value: fmt.Sprintf("%g", f), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewScalarNodeForInt creates a new node to hold an integer. | ||||
| func NewScalarNodeForInt(i int64) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!int", | ||||
| 		Value: fmt.Sprintf("%d", i), | ||||
| 	a, ok := in.([]interface{}) | ||||
| 	if ok { | ||||
| 		for i, v := range a { | ||||
| 			description += fmt.Sprintf("%s%d:\n", indent, i) | ||||
| 			description += DescribeMap(v, indent+"  ") | ||||
| 		} | ||||
| 		return description | ||||
| 	} | ||||
| 	description += fmt.Sprintf("%s%+v\n", indent, in) | ||||
| 	return description | ||||
| } | ||||
|  | ||||
| // PluralProperties returns the string "properties" pluralized. | ||||
| @ -357,40 +195,3 @@ func StringValue(item interface{}) (value string, ok bool) { | ||||
| 	} | ||||
| 	return "", false | ||||
| } | ||||
|  | ||||
| // Description returns a human-readable represention of an item. | ||||
| func Description(item interface{}) string { | ||||
| 	value, ok := item.(*yaml.Node) | ||||
| 	if ok { | ||||
| 		return jsonschema.Render(value) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%+v", item) | ||||
| } | ||||
|  | ||||
| // Display returns a description of a node for use in error messages. | ||||
| func Display(node *yaml.Node) string { | ||||
| 	switch node.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		switch node.Tag { | ||||
| 		case "!!str": | ||||
| 			return fmt.Sprintf("%s (string)", node.Value) | ||||
| 		} | ||||
| 	} | ||||
| 	return fmt.Sprintf("%+v (%T)", node, node) | ||||
| } | ||||
|  | ||||
| // Marshal creates a yaml version of a structure in our preferred style | ||||
| func Marshal(in *yaml.Node) []byte { | ||||
| 	clearStyle(in) | ||||
| 	//bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) | ||||
| 	bytes, _ := yaml.Marshal(in) | ||||
|  | ||||
| 	return bytes | ||||
| } | ||||
|  | ||||
| func clearStyle(node *yaml.Node) { | ||||
| 	node.Style = 0 | ||||
| 	for _, c := range node.Content { | ||||
| 		clearStyle(c) | ||||
| 	} | ||||
| } | ||||
|  | ||||
							
								
								
									
										2
									
								
								vendor/github.com/googleapis/gnostic/compiler/main.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/googleapis/gnostic/compiler/main.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
|  | ||||
							
								
								
									
										224
									
								
								vendor/github.com/googleapis/gnostic/compiler/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										224
									
								
								vendor/github.com/googleapis/gnostic/compiler/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -15,36 +15,22 @@ | ||||
| package compiler | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"gopkg.in/yaml.v2" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	yaml "gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| var verboseReader = false | ||||
|  | ||||
| var fileCache map[string][]byte | ||||
| var infoCache map[string]*yaml.Node | ||||
| var infoCache map[string]interface{} | ||||
| var count int64 | ||||
|  | ||||
| var fileCacheEnable = true | ||||
| var infoCacheEnable = true | ||||
|  | ||||
| // These locks are used to synchronize accesses to the fileCache and infoCache | ||||
| // maps (above). They are global state and can throw thread-related errors | ||||
| // when modified from separate goroutines. The general strategy is to protect | ||||
| // all public functions in this file with mutex Lock() calls. As a result, to | ||||
| // avoid deadlock, these public functions should not call other public | ||||
| // functions, so some public functions have private equivalents. | ||||
| // In the future, we might consider replacing the maps with sync.Map and | ||||
| // eliminating these mutexes. | ||||
| var fileCacheMutex sync.Mutex | ||||
| var infoCacheMutex sync.Mutex | ||||
| var verboseReader = false | ||||
|  | ||||
| func initializeFileCache() { | ||||
| 	if fileCache == nil { | ||||
| @ -54,122 +40,33 @@ func initializeFileCache() { | ||||
|  | ||||
| func initializeInfoCache() { | ||||
| 	if infoCache == nil { | ||||
| 		infoCache = make(map[string]*yaml.Node, 0) | ||||
| 		infoCache = make(map[string]interface{}, 0) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // EnableFileCache turns on file caching. | ||||
| func EnableFileCache() { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	fileCacheEnable = true | ||||
| } | ||||
|  | ||||
| // EnableInfoCache turns on parsed info caching. | ||||
| func EnableInfoCache() { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	infoCacheEnable = true | ||||
| } | ||||
|  | ||||
| // DisableFileCache turns off file caching. | ||||
| func DisableFileCache() { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	fileCacheEnable = false | ||||
| } | ||||
|  | ||||
| // DisableInfoCache turns off parsed info caching. | ||||
| func DisableInfoCache() { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	infoCacheEnable = false | ||||
| } | ||||
|  | ||||
| // RemoveFromFileCache removes an entry from the file cache. | ||||
| func RemoveFromFileCache(fileurl string) { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	if !fileCacheEnable { | ||||
| 		return | ||||
| 	} | ||||
| 	initializeFileCache() | ||||
| 	delete(fileCache, fileurl) | ||||
| } | ||||
|  | ||||
| // RemoveFromInfoCache removes an entry from the info cache. | ||||
| func RemoveFromInfoCache(filename string) { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	if !infoCacheEnable { | ||||
| 		return | ||||
| 	} | ||||
| 	initializeInfoCache() | ||||
| 	delete(infoCache, filename) | ||||
| } | ||||
|  | ||||
| // GetInfoCache returns the info cache map. | ||||
| func GetInfoCache() map[string]*yaml.Node { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	if infoCache == nil { | ||||
| 		initializeInfoCache() | ||||
| 	} | ||||
| 	return infoCache | ||||
| } | ||||
|  | ||||
| // ClearFileCache clears the file cache. | ||||
| func ClearFileCache() { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	fileCache = make(map[string][]byte, 0) | ||||
| } | ||||
|  | ||||
| // ClearInfoCache clears the info cache. | ||||
| func ClearInfoCache() { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	infoCache = make(map[string]*yaml.Node) | ||||
| } | ||||
|  | ||||
| // ClearCaches clears all caches. | ||||
| func ClearCaches() { | ||||
| 	ClearFileCache() | ||||
| 	ClearInfoCache() | ||||
| } | ||||
|  | ||||
| // FetchFile gets a specified file from the local filesystem or a remote location. | ||||
| func FetchFile(fileurl string) ([]byte, error) { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	return fetchFile(fileurl) | ||||
| } | ||||
|  | ||||
| func fetchFile(fileurl string) ([]byte, error) { | ||||
| 	var bytes []byte | ||||
| 	initializeFileCache() | ||||
| 	if fileCacheEnable { | ||||
| 		bytes, ok := fileCache[fileurl] | ||||
| 		if ok { | ||||
| 			if verboseReader { | ||||
| 				log.Printf("Cache hit %s", fileurl) | ||||
| 			} | ||||
| 			return bytes, nil | ||||
| 		} | ||||
| 	bytes, ok := fileCache[fileurl] | ||||
| 	if ok { | ||||
| 		if verboseReader { | ||||
| 			log.Printf("Fetching %s", fileurl) | ||||
| 			log.Printf("Cache hit %s", fileurl) | ||||
| 		} | ||||
| 		return bytes, nil | ||||
| 	} | ||||
| 	if verboseReader { | ||||
| 		log.Printf("Fetching %s", fileurl) | ||||
| 	} | ||||
| 	response, err := http.Get(fileurl) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer response.Body.Close() | ||||
| 	if response.StatusCode != 200 { | ||||
| 		return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) | ||||
| 		return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) | ||||
| 	} | ||||
| 	defer response.Body.Close() | ||||
| 	bytes, err = ioutil.ReadAll(response.Body) | ||||
| 	if fileCacheEnable && err == nil { | ||||
| 	if err == nil { | ||||
| 		fileCache[fileurl] = bytes | ||||
| 	} | ||||
| 	return bytes, err | ||||
| @ -177,17 +74,11 @@ func fetchFile(fileurl string) ([]byte, error) { | ||||
|  | ||||
| // ReadBytesForFile reads the bytes of a file. | ||||
| func ReadBytesForFile(filename string) ([]byte, error) { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	return readBytesForFile(filename) | ||||
| } | ||||
|  | ||||
| func readBytesForFile(filename string) ([]byte, error) { | ||||
| 	// is the filename a url? | ||||
| 	fileurl, _ := url.Parse(filename) | ||||
| 	if fileurl.Scheme != "" { | ||||
| 		// yes, fetch it | ||||
| 		bytes, err := fetchFile(filename) | ||||
| 		bytes, err := FetchFile(filename) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| @ -201,46 +92,32 @@ func readBytesForFile(filename string) ([]byte, error) { | ||||
| 	return bytes, nil | ||||
| } | ||||
|  | ||||
| // ReadInfoFromBytes unmarshals a file as a *yaml.Node. | ||||
| func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| 	return readInfoFromBytes(filename, bytes) | ||||
| } | ||||
|  | ||||
| func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { | ||||
| // ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. | ||||
| func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { | ||||
| 	initializeInfoCache() | ||||
| 	if infoCacheEnable { | ||||
| 		cachedInfo, ok := infoCache[filename] | ||||
| 		if ok { | ||||
| 			if verboseReader { | ||||
| 				log.Printf("Cache hit info for file %s", filename) | ||||
| 			} | ||||
| 			return cachedInfo, nil | ||||
| 		} | ||||
| 	cachedInfo, ok := infoCache[filename] | ||||
| 	if ok { | ||||
| 		if verboseReader { | ||||
| 			log.Printf("Reading info for file %s", filename) | ||||
| 			log.Printf("Cache hit info for file %s", filename) | ||||
| 		} | ||||
| 		return cachedInfo, nil | ||||
| 	} | ||||
| 	var info yaml.Node | ||||
| 	if verboseReader { | ||||
| 		log.Printf("Reading info for file %s", filename) | ||||
| 	} | ||||
| 	var info yaml.MapSlice | ||||
| 	err := yaml.Unmarshal(bytes, &info) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if infoCacheEnable && len(filename) > 0 { | ||||
| 		infoCache[filename] = &info | ||||
| 	} | ||||
| 	return &info, nil | ||||
| 	infoCache[filename] = info | ||||
| 	return info, nil | ||||
| } | ||||
|  | ||||
| // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. | ||||
| func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { | ||||
| 	fileCacheMutex.Lock() | ||||
| 	defer fileCacheMutex.Unlock() | ||||
| 	infoCacheMutex.Lock() | ||||
| 	defer infoCacheMutex.Unlock() | ||||
| func ReadInfoForRef(basefile string, ref string) (interface{}, error) { | ||||
| 	initializeInfoCache() | ||||
| 	if infoCacheEnable { | ||||
| 	{ | ||||
| 		info, ok := infoCache[ref] | ||||
| 		if ok { | ||||
| 			if verboseReader { | ||||
| @ -248,46 +125,37 @@ func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { | ||||
| 			} | ||||
| 			return info, nil | ||||
| 		} | ||||
| 		if verboseReader { | ||||
| 			log.Printf("Reading info for ref %s#%s", basefile, ref) | ||||
| 		} | ||||
| 	} | ||||
| 	if verboseReader { | ||||
| 		log.Printf("Reading info for ref %s#%s", basefile, ref) | ||||
| 	} | ||||
| 	count = count + 1 | ||||
| 	basedir, _ := filepath.Split(basefile) | ||||
| 	parts := strings.Split(ref, "#") | ||||
| 	var filename string | ||||
| 	if parts[0] != "" { | ||||
| 		filename = parts[0] | ||||
| 		if _, err := url.ParseRequestURI(parts[0]); err != nil { | ||||
| 			// It is not an URL, so the file is local | ||||
| 			filename = basedir + parts[0] | ||||
| 		} | ||||
| 		filename = basedir + parts[0] | ||||
| 	} else { | ||||
| 		filename = basefile | ||||
| 	} | ||||
| 	bytes, err := readBytesForFile(filename) | ||||
| 	bytes, err := ReadBytesForFile(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	info, err := readInfoFromBytes(filename, bytes) | ||||
| 	if info != nil && info.Kind == yaml.DocumentNode { | ||||
| 		info = info.Content[0] | ||||
| 	} | ||||
| 	info, err := ReadInfoFromBytes(filename, bytes) | ||||
| 	if err != nil { | ||||
| 		log.Printf("File error: %v\n", err) | ||||
| 	} else { | ||||
| 		if info == nil { | ||||
| 			return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) | ||||
| 		} | ||||
| 		if len(parts) > 1 { | ||||
| 			path := strings.Split(parts[1], "/") | ||||
| 			for i, key := range path { | ||||
| 				if i > 0 { | ||||
| 					m := info | ||||
| 					if true { | ||||
| 					m, ok := info.(yaml.MapSlice) | ||||
| 					if ok { | ||||
| 						found := false | ||||
| 						for i := 0; i < len(m.Content); i += 2 { | ||||
| 							if m.Content[i].Value == key { | ||||
| 								info = m.Content[i+1] | ||||
| 						for _, section := range m { | ||||
| 							if section.Key == key { | ||||
| 								info = section.Value | ||||
| 								found = true | ||||
| 							} | ||||
| 						} | ||||
| @ -300,8 +168,6 @@ func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if infoCacheEnable { | ||||
| 		infoCache[ref] = info | ||||
| 	} | ||||
| 	infoCache[ref] = info | ||||
| 	return info, nil | ||||
| } | ||||
|  | ||||
							
								
								
									
										5
									
								
								vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,5 @@ | ||||
| go get github.com/golang/protobuf/protoc-gen-go | ||||
|  | ||||
| protoc \ | ||||
| --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto  | ||||
|  | ||||
							
								
								
									
										12
									
								
								vendor/github.com/googleapis/gnostic/extensions/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/googleapis/gnostic/extensions/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,13 +1,5 @@ | ||||
| # Extensions | ||||
|  | ||||
| **Extension Support is experimental.** | ||||
| This directory contains support code for building Gnostic extensions and associated examples. | ||||
|  | ||||
| This directory contains support code for building Gnostic extensio handlers and | ||||
| associated examples. | ||||
|  | ||||
| Extension handlers can be used to compile vendor or specification extensions | ||||
| into protocol buffer structures. | ||||
|  | ||||
| Like plugins, extension handlers are built as separate executables. Extension | ||||
| bodies are written to extension handlers as serialized | ||||
| ExtensionHandlerRequests. | ||||
| Extensions are used to compile vendor or specification extensions into protocol buffer structures. | ||||
|  | ||||
							
								
								
									
										506
									
								
								vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										506
									
								
								vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,186 +1,113 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
| // Code generated by protoc-gen-go. | ||||
| // source: extension.proto | ||||
| // DO NOT EDIT! | ||||
|  | ||||
| // Code generated by protoc-gen-go. DO NOT EDIT. | ||||
| // versions: | ||||
| // 	protoc-gen-go v1.24.0 | ||||
| // 	protoc        v3.12.0 | ||||
| // source: extensions/extension.proto | ||||
| /* | ||||
| Package openapiextension_v1 is a generated protocol buffer package. | ||||
|  | ||||
| package gnostic_extension_v1 | ||||
| It is generated from these files: | ||||
| 	extension.proto | ||||
|  | ||||
| import ( | ||||
| 	proto "github.com/golang/protobuf/proto" | ||||
| 	any "github.com/golang/protobuf/ptypes/any" | ||||
| 	protoreflect "google.golang.org/protobuf/reflect/protoreflect" | ||||
| 	protoimpl "google.golang.org/protobuf/runtime/protoimpl" | ||||
| 	reflect "reflect" | ||||
| 	sync "sync" | ||||
| ) | ||||
| It has these top-level messages: | ||||
| 	Version | ||||
| 	ExtensionHandlerRequest | ||||
| 	ExtensionHandlerResponse | ||||
| 	Wrapper | ||||
| */ | ||||
| package openapiextension_v1 | ||||
|  | ||||
| const ( | ||||
| 	// Verify that this generated code is sufficiently up-to-date. | ||||
| 	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) | ||||
| 	// Verify that runtime/protoimpl is sufficiently up-to-date. | ||||
| 	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) | ||||
| ) | ||||
| import proto "github.com/golang/protobuf/proto" | ||||
| import fmt "fmt" | ||||
| import math "math" | ||||
| import google_protobuf "github.com/golang/protobuf/ptypes/any" | ||||
|  | ||||
| // This is a compile-time assertion that a sufficiently up-to-date version | ||||
| // of the legacy proto package is being used. | ||||
| const _ = proto.ProtoPackageIsVersion4 | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // The version number of Gnostic. | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | ||||
|  | ||||
| // The version number of OpenAPI compiler. | ||||
| type Version struct { | ||||
| 	state         protoimpl.MessageState | ||||
| 	sizeCache     protoimpl.SizeCache | ||||
| 	unknownFields protoimpl.UnknownFields | ||||
|  | ||||
| 	Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` | ||||
| 	Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` | ||||
| 	Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` | ||||
| 	Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` | ||||
| 	Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` | ||||
| 	Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` | ||||
| 	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should | ||||
| 	// be empty for mainline stable releases. | ||||
| 	Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` | ||||
| 	Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` | ||||
| } | ||||
|  | ||||
| func (x *Version) Reset() { | ||||
| 	*x = Version{} | ||||
| 	if protoimpl.UnsafeEnabled { | ||||
| 		mi := &file_extensions_extension_proto_msgTypes[0] | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		ms.StoreMessageInfo(mi) | ||||
| 	} | ||||
| } | ||||
| func (m *Version) Reset()                    { *m = Version{} } | ||||
| func (m *Version) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Version) ProtoMessage()               {} | ||||
| func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | ||||
|  | ||||
| func (x *Version) String() string { | ||||
| 	return protoimpl.X.MessageStringOf(x) | ||||
| } | ||||
|  | ||||
| func (*Version) ProtoMessage() {} | ||||
|  | ||||
| func (x *Version) ProtoReflect() protoreflect.Message { | ||||
| 	mi := &file_extensions_extension_proto_msgTypes[0] | ||||
| 	if protoimpl.UnsafeEnabled && x != nil { | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		if ms.LoadMessageInfo() == nil { | ||||
| 			ms.StoreMessageInfo(mi) | ||||
| 		} | ||||
| 		return ms | ||||
| 	} | ||||
| 	return mi.MessageOf(x) | ||||
| } | ||||
|  | ||||
| // Deprecated: Use Version.ProtoReflect.Descriptor instead. | ||||
| func (*Version) Descriptor() ([]byte, []int) { | ||||
| 	return file_extensions_extension_proto_rawDescGZIP(), []int{0} | ||||
| } | ||||
|  | ||||
| func (x *Version) GetMajor() int32 { | ||||
| 	if x != nil { | ||||
| 		return x.Major | ||||
| func (m *Version) GetMajor() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Major | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (x *Version) GetMinor() int32 { | ||||
| 	if x != nil { | ||||
| 		return x.Minor | ||||
| func (m *Version) GetMinor() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Minor | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (x *Version) GetPatch() int32 { | ||||
| 	if x != nil { | ||||
| 		return x.Patch | ||||
| func (m *Version) GetPatch() int32 { | ||||
| 	if m != nil { | ||||
| 		return m.Patch | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (x *Version) GetSuffix() string { | ||||
| 	if x != nil { | ||||
| 		return x.Suffix | ||||
| func (m *Version) GetSuffix() string { | ||||
| 	if m != nil { | ||||
| 		return m.Suffix | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // An encoded Request is written to the ExtensionHandler's stdin. | ||||
| type ExtensionHandlerRequest struct { | ||||
| 	state         protoimpl.MessageState | ||||
| 	sizeCache     protoimpl.SizeCache | ||||
| 	unknownFields protoimpl.UnknownFields | ||||
|  | ||||
| 	// The extension to process. | ||||
| 	Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` | ||||
| 	// The version number of Gnostic. | ||||
| 	CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` | ||||
| 	// The OpenAPI descriptions that were explicitly listed on the command line. | ||||
| 	// The specifications will appear in the order they are specified to openapic. | ||||
| 	Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` | ||||
| 	// The version number of openapi compiler. | ||||
| 	CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerRequest) Reset() { | ||||
| 	*x = ExtensionHandlerRequest{} | ||||
| 	if protoimpl.UnsafeEnabled { | ||||
| 		mi := &file_extensions_extension_proto_msgTypes[1] | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		ms.StoreMessageInfo(mi) | ||||
| 	} | ||||
| } | ||||
| func (m *ExtensionHandlerRequest) Reset()                    { *m = ExtensionHandlerRequest{} } | ||||
| func (m *ExtensionHandlerRequest) String() string            { return proto.CompactTextString(m) } | ||||
| func (*ExtensionHandlerRequest) ProtoMessage()               {} | ||||
| func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | ||||
|  | ||||
| func (x *ExtensionHandlerRequest) String() string { | ||||
| 	return protoimpl.X.MessageStringOf(x) | ||||
| } | ||||
|  | ||||
| func (*ExtensionHandlerRequest) ProtoMessage() {} | ||||
|  | ||||
| func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { | ||||
| 	mi := &file_extensions_extension_proto_msgTypes[1] | ||||
| 	if protoimpl.UnsafeEnabled && x != nil { | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		if ms.LoadMessageInfo() == nil { | ||||
| 			ms.StoreMessageInfo(mi) | ||||
| 		} | ||||
| 		return ms | ||||
| 	} | ||||
| 	return mi.MessageOf(x) | ||||
| } | ||||
|  | ||||
| // Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. | ||||
| func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { | ||||
| 	return file_extensions_extension_proto_rawDescGZIP(), []int{1} | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { | ||||
| 	if x != nil { | ||||
| 		return x.Wrapper | ||||
| func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { | ||||
| 	if m != nil { | ||||
| 		return m.Wrapper | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { | ||||
| 	if x != nil { | ||||
| 		return x.CompilerVersion | ||||
| func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { | ||||
| 	if m != nil { | ||||
| 		return m.CompilerVersion | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // The extensions writes an encoded ExtensionHandlerResponse to stdout. | ||||
| type ExtensionHandlerResponse struct { | ||||
| 	state         protoimpl.MessageState | ||||
| 	sizeCache     protoimpl.SizeCache | ||||
| 	unknownFields protoimpl.UnknownFields | ||||
|  | ||||
| 	// true if the extension is handled by the extension handler; false otherwise | ||||
| 	Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` | ||||
| 	// Error message(s).  If non-empty, the extension handling failed. | ||||
| 	Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` | ||||
| 	// Error message.  If non-empty, the extension handling failed. | ||||
| 	// The extension handler process should exit with status code zero | ||||
| 	// even if it reports an error in this way. | ||||
| 	// | ||||
| @ -189,277 +116,104 @@ type ExtensionHandlerResponse struct { | ||||
| 	// itself -- such as the input Document being unparseable -- should be | ||||
| 	// reported by writing a message to stderr and exiting with a non-zero | ||||
| 	// status code. | ||||
| 	Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` | ||||
| 	Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` | ||||
| 	// text output | ||||
| 	Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` | ||||
| 	Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) Reset() { | ||||
| 	*x = ExtensionHandlerResponse{} | ||||
| 	if protoimpl.UnsafeEnabled { | ||||
| 		mi := &file_extensions_extension_proto_msgTypes[2] | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		ms.StoreMessageInfo(mi) | ||||
| 	} | ||||
| } | ||||
| func (m *ExtensionHandlerResponse) Reset()                    { *m = ExtensionHandlerResponse{} } | ||||
| func (m *ExtensionHandlerResponse) String() string            { return proto.CompactTextString(m) } | ||||
| func (*ExtensionHandlerResponse) ProtoMessage()               {} | ||||
| func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) String() string { | ||||
| 	return protoimpl.X.MessageStringOf(x) | ||||
| } | ||||
|  | ||||
| func (*ExtensionHandlerResponse) ProtoMessage() {} | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { | ||||
| 	mi := &file_extensions_extension_proto_msgTypes[2] | ||||
| 	if protoimpl.UnsafeEnabled && x != nil { | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		if ms.LoadMessageInfo() == nil { | ||||
| 			ms.StoreMessageInfo(mi) | ||||
| 		} | ||||
| 		return ms | ||||
| 	} | ||||
| 	return mi.MessageOf(x) | ||||
| } | ||||
|  | ||||
| // Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. | ||||
| func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { | ||||
| 	return file_extensions_extension_proto_rawDescGZIP(), []int{2} | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) GetHandled() bool { | ||||
| 	if x != nil { | ||||
| 		return x.Handled | ||||
| func (m *ExtensionHandlerResponse) GetHandled() bool { | ||||
| 	if m != nil { | ||||
| 		return m.Handled | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) GetErrors() []string { | ||||
| 	if x != nil { | ||||
| 		return x.Errors | ||||
| func (m *ExtensionHandlerResponse) GetError() []string { | ||||
| 	if m != nil { | ||||
| 		return m.Error | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (x *ExtensionHandlerResponse) GetValue() *any.Any { | ||||
| 	if x != nil { | ||||
| 		return x.Value | ||||
| func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { | ||||
| 	if m != nil { | ||||
| 		return m.Value | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Wrapper struct { | ||||
| 	state         protoimpl.MessageState | ||||
| 	sizeCache     protoimpl.SizeCache | ||||
| 	unknownFields protoimpl.UnknownFields | ||||
|  | ||||
| 	// version of the OpenAPI specification in which this extension was written. | ||||
| 	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` | ||||
| 	// Name of the extension. | ||||
| 	ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` | ||||
| 	// YAML-formatted extension value. | ||||
| 	Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` | ||||
| 	Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` | ||||
| 	// Name of the extension | ||||
| 	ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` | ||||
| 	// Must be a valid yaml for the proto | ||||
| 	Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` | ||||
| } | ||||
|  | ||||
| func (x *Wrapper) Reset() { | ||||
| 	*x = Wrapper{} | ||||
| 	if protoimpl.UnsafeEnabled { | ||||
| 		mi := &file_extensions_extension_proto_msgTypes[3] | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		ms.StoreMessageInfo(mi) | ||||
| 	} | ||||
| } | ||||
| func (m *Wrapper) Reset()                    { *m = Wrapper{} } | ||||
| func (m *Wrapper) String() string            { return proto.CompactTextString(m) } | ||||
| func (*Wrapper) ProtoMessage()               {} | ||||
| func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } | ||||
|  | ||||
| func (x *Wrapper) String() string { | ||||
| 	return protoimpl.X.MessageStringOf(x) | ||||
| } | ||||
|  | ||||
| func (*Wrapper) ProtoMessage() {} | ||||
|  | ||||
| func (x *Wrapper) ProtoReflect() protoreflect.Message { | ||||
| 	mi := &file_extensions_extension_proto_msgTypes[3] | ||||
| 	if protoimpl.UnsafeEnabled && x != nil { | ||||
| 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | ||||
| 		if ms.LoadMessageInfo() == nil { | ||||
| 			ms.StoreMessageInfo(mi) | ||||
| 		} | ||||
| 		return ms | ||||
| 	} | ||||
| 	return mi.MessageOf(x) | ||||
| } | ||||
|  | ||||
| // Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. | ||||
| func (*Wrapper) Descriptor() ([]byte, []int) { | ||||
| 	return file_extensions_extension_proto_rawDescGZIP(), []int{3} | ||||
| } | ||||
|  | ||||
| func (x *Wrapper) GetVersion() string { | ||||
| 	if x != nil { | ||||
| 		return x.Version | ||||
| func (m *Wrapper) GetVersion() string { | ||||
| 	if m != nil { | ||||
| 		return m.Version | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (x *Wrapper) GetExtensionName() string { | ||||
| 	if x != nil { | ||||
| 		return x.ExtensionName | ||||
| func (m *Wrapper) GetExtensionName() string { | ||||
| 	if m != nil { | ||||
| 		return m.ExtensionName | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (x *Wrapper) GetYaml() string { | ||||
| 	if x != nil { | ||||
| 		return x.Yaml | ||||
| func (m *Wrapper) GetYaml() string { | ||||
| 	if m != nil { | ||||
| 		return m.Yaml | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| var File_extensions_extension_proto protoreflect.FileDescriptor | ||||
|  | ||||
| var file_extensions_extension_proto_rawDesc = []byte{ | ||||
| 	0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, | ||||
| 	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, | ||||
| 	0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, | ||||
| 	0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, | ||||
| 	0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, | ||||
| 	0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, | ||||
| 	0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, | ||||
| 	0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, | ||||
| 	0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, | ||||
| 	0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, | ||||
| 	0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, | ||||
| 	0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, | ||||
| 	0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, | ||||
| 	0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, | ||||
| 	0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, | ||||
| 	0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, | ||||
| 	0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, | ||||
| 	0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, | ||||
| 	0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, | ||||
| 	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, | ||||
| 	0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, | ||||
| 	0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, | ||||
| 	0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, | ||||
| 	0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, | ||||
| 	0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, | ||||
| 	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, | ||||
| 	0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, | ||||
| 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, | ||||
| 	0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, | ||||
| 	0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, | ||||
| 	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, | ||||
| 	0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, | ||||
| 	0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, | ||||
| 	0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, | ||||
| 	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f, | ||||
| 	0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, | ||||
| 	0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, | ||||
| 	0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e, | ||||
| 	0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, | ||||
| 	0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, | ||||
| func init() { | ||||
| 	proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") | ||||
| 	proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") | ||||
| 	proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") | ||||
| 	proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	file_extensions_extension_proto_rawDescOnce sync.Once | ||||
| 	file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc | ||||
| ) | ||||
| func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } | ||||
|  | ||||
| func file_extensions_extension_proto_rawDescGZIP() []byte { | ||||
| 	file_extensions_extension_proto_rawDescOnce.Do(func() { | ||||
| 		file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) | ||||
| 	}) | ||||
| 	return file_extensions_extension_proto_rawDescData | ||||
| } | ||||
|  | ||||
| var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) | ||||
| var file_extensions_extension_proto_goTypes = []interface{}{ | ||||
| 	(*Version)(nil),                  // 0: gnostic.extension.v1.Version | ||||
| 	(*ExtensionHandlerRequest)(nil),  // 1: gnostic.extension.v1.ExtensionHandlerRequest | ||||
| 	(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse | ||||
| 	(*Wrapper)(nil),                  // 3: gnostic.extension.v1.Wrapper | ||||
| 	(*any.Any)(nil),                  // 4: google.protobuf.Any | ||||
| } | ||||
| var file_extensions_extension_proto_depIdxs = []int32{ | ||||
| 	3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper | ||||
| 	0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version | ||||
| 	4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any | ||||
| 	3, // [3:3] is the sub-list for method output_type | ||||
| 	3, // [3:3] is the sub-list for method input_type | ||||
| 	3, // [3:3] is the sub-list for extension type_name | ||||
| 	3, // [3:3] is the sub-list for extension extendee | ||||
| 	0, // [0:3] is the sub-list for field type_name | ||||
| } | ||||
|  | ||||
| func init() { file_extensions_extension_proto_init() } | ||||
| func file_extensions_extension_proto_init() { | ||||
| 	if File_extensions_extension_proto != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	if !protoimpl.UnsafeEnabled { | ||||
| 		file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { | ||||
| 			switch v := v.(*Version); i { | ||||
| 			case 0: | ||||
| 				return &v.state | ||||
| 			case 1: | ||||
| 				return &v.sizeCache | ||||
| 			case 2: | ||||
| 				return &v.unknownFields | ||||
| 			default: | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 		file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { | ||||
| 			switch v := v.(*ExtensionHandlerRequest); i { | ||||
| 			case 0: | ||||
| 				return &v.state | ||||
| 			case 1: | ||||
| 				return &v.sizeCache | ||||
| 			case 2: | ||||
| 				return &v.unknownFields | ||||
| 			default: | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 		file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { | ||||
| 			switch v := v.(*ExtensionHandlerResponse); i { | ||||
| 			case 0: | ||||
| 				return &v.state | ||||
| 			case 1: | ||||
| 				return &v.sizeCache | ||||
| 			case 2: | ||||
| 				return &v.unknownFields | ||||
| 			default: | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 		file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { | ||||
| 			switch v := v.(*Wrapper); i { | ||||
| 			case 0: | ||||
| 				return &v.state | ||||
| 			case 1: | ||||
| 				return &v.sizeCache | ||||
| 			case 2: | ||||
| 				return &v.unknownFields | ||||
| 			default: | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	type x struct{} | ||||
| 	out := protoimpl.TypeBuilder{ | ||||
| 		File: protoimpl.DescBuilder{ | ||||
| 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(), | ||||
| 			RawDescriptor: file_extensions_extension_proto_rawDesc, | ||||
| 			NumEnums:      0, | ||||
| 			NumMessages:   4, | ||||
| 			NumExtensions: 0, | ||||
| 			NumServices:   0, | ||||
| 		}, | ||||
| 		GoTypes:           file_extensions_extension_proto_goTypes, | ||||
| 		DependencyIndexes: file_extensions_extension_proto_depIdxs, | ||||
| 		MessageInfos:      file_extensions_extension_proto_msgTypes, | ||||
| 	}.Build() | ||||
| 	File_extensions_extension_proto = out.File | ||||
| 	file_extensions_extension_proto_rawDesc = nil | ||||
| 	file_extensions_extension_proto_goTypes = nil | ||||
| 	file_extensions_extension_proto_depIdxs = nil | ||||
| var fileDescriptor0 = []byte{ | ||||
| 	// 355 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, | ||||
| 	0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, | ||||
| 	0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, | ||||
| 	0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, | ||||
| 	0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, | ||||
| 	0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, | ||||
| 	0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, | ||||
| 	0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, | ||||
| 	0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, | ||||
| 	0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, | ||||
| 	0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, | ||||
| 	0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, | ||||
| 	0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, | ||||
| 	0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, | ||||
| 	0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, | ||||
| 	0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, | ||||
| 	0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, | ||||
| 	0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, | ||||
| 	0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, | ||||
| 	0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, | ||||
| 	0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, | ||||
| 	0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, | ||||
| 	0x02, 0x00, 0x00, | ||||
| } | ||||
|  | ||||
							
								
								
									
										31
									
								
								vendor/github.com/googleapis/gnostic/extensions/extension.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										31
									
								
								vendor/github.com/googleapis/gnostic/extensions/extension.proto
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -14,9 +14,8 @@ | ||||
|  | ||||
| syntax = "proto3"; | ||||
|  | ||||
| package gnostic.extension.v1; | ||||
|  | ||||
| import "google/protobuf/any.proto"; | ||||
| package openapiextension.v1; | ||||
|  | ||||
| // This option lets the proto compiler generate Java code inside the package | ||||
| // name (see below) instead of inside an outer class. It creates a simpler | ||||
| @ -27,10 +26,10 @@ option java_multiple_files = true; | ||||
| // The Java outer classname should be the filename in UpperCamelCase. This | ||||
| // class is only used to hold proto descriptor, so developers don't need to | ||||
| // work with it directly. | ||||
| option java_outer_classname = "GnosticExtension"; | ||||
| option java_outer_classname = "OpenAPIExtensionV1"; | ||||
|  | ||||
| // The Java package name must be proto package name with proper prefix. | ||||
| option java_package = "org.gnostic.v1"; | ||||
| option java_package = "org.openapic.v1"; | ||||
|  | ||||
| // A reasonable prefix for the Objective-C symbols generated from the package. | ||||
| // It should at a minimum be 3 characters long, all uppercase, and convention | ||||
| @ -38,12 +37,9 @@ option java_package = "org.gnostic.v1"; | ||||
| // hopefully unique enough to not conflict with things that may come along in | ||||
| // the future. 'GPB' is reserved for the protocol buffer implementation itself. | ||||
| // | ||||
| option objc_class_prefix = "GNX"; // "Gnostic Extension" | ||||
| option objc_class_prefix = "OAE"; // "OpenAPI Extension" | ||||
|  | ||||
| // The Go package name. | ||||
| option go_package = "extensions;gnostic_extension_v1"; | ||||
|  | ||||
| // The version number of Gnostic. | ||||
| // The version number of OpenAPI compiler. | ||||
| message Version { | ||||
|   int32 major = 1; | ||||
|   int32 minor = 2; | ||||
| @ -56,11 +52,12 @@ message Version { | ||||
| // An encoded Request is written to the ExtensionHandler's stdin. | ||||
| message ExtensionHandlerRequest { | ||||
|  | ||||
|   // The extension to process. | ||||
|   // The OpenAPI descriptions that were explicitly listed on the command line. | ||||
|   // The specifications will appear in the order they are specified to openapic. | ||||
|   Wrapper wrapper = 1; | ||||
|  | ||||
|   // The version number of Gnostic. | ||||
|   Version compiler_version = 2; | ||||
|   // The version number of openapi compiler. | ||||
|   Version compiler_version = 3; | ||||
| } | ||||
|  | ||||
| // The extensions writes an encoded ExtensionHandlerResponse to stdout. | ||||
| @ -69,7 +66,7 @@ message ExtensionHandlerResponse { | ||||
|   // true if the extension is handled by the extension handler; false otherwise | ||||
|   bool handled = 1; | ||||
|  | ||||
|   // Error message(s).  If non-empty, the extension handling failed. | ||||
|   // Error message.  If non-empty, the extension handling failed. | ||||
|   // The extension handler process should exit with status code zero | ||||
|   // even if it reports an error in this way. | ||||
|   // | ||||
| @ -78,7 +75,7 @@ message ExtensionHandlerResponse { | ||||
|   // itself -- such as the input Document being unparseable -- should be | ||||
|   // reported by writing a message to stderr and exiting with a non-zero | ||||
|   // status code. | ||||
|   repeated string errors = 2; | ||||
|   repeated string error = 2; | ||||
|  | ||||
|   // text output | ||||
|   google.protobuf.Any value = 3; | ||||
| @ -88,9 +85,9 @@ message Wrapper { | ||||
|   // version of the OpenAPI specification in which this extension was written. | ||||
|   string version = 1; | ||||
|  | ||||
|   // Name of the extension. | ||||
|   // Name of the extension | ||||
|   string extension_name = 2; | ||||
|  | ||||
|   // YAML-formatted extension value. | ||||
|   // Must be a valid yaml for the proto | ||||
|   string yaml = 3; | ||||
| } | ||||
|  | ||||
							
								
								
									
										68
									
								
								vendor/github.com/googleapis/gnostic/extensions/extensions.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										68
									
								
								vendor/github.com/googleapis/gnostic/extensions/extensions.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // Copyright 2017 Google Inc. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| @ -12,53 +12,71 @@ | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package gnostic_extension_v1 | ||||
| package openapiextension_v1 | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/golang/protobuf/ptypes" | ||||
| ) | ||||
|  | ||||
| type documentHandler func(version string, extensionName string, document string) | ||||
| type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) | ||||
|  | ||||
| // Main implements the main program of an extension handler. | ||||
| func Main(handler extensionHandler) { | ||||
| 	// unpack the request | ||||
| func forInputYamlFromOpenapic(handler documentHandler) { | ||||
| 	data, err := ioutil.ReadAll(os.Stdin) | ||||
| 	if err != nil { | ||||
| 		log.Println("File error:", err.Error()) | ||||
| 		fmt.Println("File error:", err.Error()) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| 	if len(data) == 0 { | ||||
| 		log.Println("No input data.") | ||||
| 		fmt.Println("No input data.") | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| 	request := &ExtensionHandlerRequest{} | ||||
| 	err = proto.Unmarshal(data, request) | ||||
| 	if err != nil { | ||||
| 		log.Println("Input error:", err.Error()) | ||||
| 		fmt.Println("Input error:", err.Error()) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| 	// call the handler | ||||
| 	handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) | ||||
| 	// respond with the output of the handler | ||||
| 	response := &ExtensionHandlerResponse{ | ||||
| 		Handled: false, // default assumption | ||||
| 		Errors:  make([]string, 0), | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		response.Errors = append(response.Errors, err.Error()) | ||||
| 	} else if handled { | ||||
| 		response.Handled = true | ||||
| 		response.Value, err = ptypes.MarshalAny(output) | ||||
| 		if err != nil { | ||||
| 			response.Errors = append(response.Errors, err.Error()) | ||||
| 		} | ||||
| 	} | ||||
| 	handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) | ||||
| } | ||||
|  | ||||
| // ProcessExtension calles the handler for a specified extension. | ||||
| func ProcessExtension(handleExtension extensionHandler) { | ||||
| 	response := &ExtensionHandlerResponse{} | ||||
| 	forInputYamlFromOpenapic( | ||||
| 		func(version string, extensionName string, yamlInput string) { | ||||
| 			var newObject proto.Message | ||||
| 			var err error | ||||
|  | ||||
| 			handled, newObject, err := handleExtension(extensionName, yamlInput) | ||||
| 			if !handled { | ||||
| 				responseBytes, _ := proto.Marshal(response) | ||||
| 				os.Stdout.Write(responseBytes) | ||||
| 				os.Exit(0) | ||||
| 			} | ||||
|  | ||||
| 			// If we reach here, then the extension is handled | ||||
| 			response.Handled = true | ||||
| 			if err != nil { | ||||
| 				response.Error = append(response.Error, err.Error()) | ||||
| 				responseBytes, _ := proto.Marshal(response) | ||||
| 				os.Stdout.Write(responseBytes) | ||||
| 				os.Exit(0) | ||||
| 			} | ||||
| 			response.Value, err = ptypes.MarshalAny(newObject) | ||||
| 			if err != nil { | ||||
| 				response.Error = append(response.Error, err.Error()) | ||||
| 				responseBytes, _ := proto.Marshal(response) | ||||
| 				os.Stdout.Write(responseBytes) | ||||
| 				os.Exit(0) | ||||
| 			} | ||||
| 		}) | ||||
|  | ||||
| 	responseBytes, _ := proto.Marshal(response) | ||||
| 	os.Stdout.Write(responseBytes) | ||||
| } | ||||
|  | ||||
							
								
								
									
										4
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,4 +0,0 @@ | ||||
| # jsonschema | ||||
|  | ||||
| This directory contains code for reading, writing, and manipulating JSON | ||||
| schemas. | ||||
							
								
								
									
										84
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/base.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/base.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,84 +0,0 @@ | ||||
|  | ||||
| // THIS FILE IS AUTOMATICALLY GENERATED. | ||||
|  | ||||
| package jsonschema | ||||
|  | ||||
| import ( | ||||
| 	"encoding/base64" | ||||
| ) | ||||
|  | ||||
| func baseSchemaBytes() ([]byte, error){ | ||||
| 	return base64.StdEncoding.DecodeString( | ||||
| `ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi | ||||
| JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl | ||||
| c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK | ||||
| ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg | ||||
| ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg | ||||
| ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp | ||||
| bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp | ||||
| dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl | ||||
| ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK | ||||
| ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v | ||||
| bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg | ||||
| ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki | ||||
| LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p | ||||
| bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s | ||||
| CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog | ||||
| ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg | ||||
| ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog | ||||
| ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg | ||||
| ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog | ||||
| ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 | ||||
| IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog | ||||
| ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 | ||||
| ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl | ||||
| ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw | ||||
| ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg | ||||
| ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg | ||||
| ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg | ||||
| ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg | ||||
| IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 | ||||
| aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg | ||||
| ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg | ||||
| ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg | ||||
| ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK | ||||
| ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi | ||||
| ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP | ||||
| ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy | ||||
| ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg | ||||
| ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv | ||||
| ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi | ||||
| OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl | ||||
| SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs | ||||
| dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k | ||||
| ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk | ||||
| cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl | ||||
| cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh | ||||
| ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg | ||||
| ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg | ||||
| ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk | ||||
| ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk | ||||
| ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 | ||||
| IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi | ||||
| b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 | ||||
| LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl | ||||
| cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv | ||||
| bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog | ||||
| ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq | ||||
| ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg | ||||
| ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg | ||||
| ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg | ||||
| ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg | ||||
| ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu | ||||
| aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh | ||||
| bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl | ||||
| cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs | ||||
| CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs | ||||
| ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg | ||||
| ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg | ||||
| ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy | ||||
| YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 | ||||
| IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg | ||||
| fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 | ||||
| IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 | ||||
| c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} | ||||
							
								
								
									
										229
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/display.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										229
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/display.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,229 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jsonschema | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // | ||||
| // DISPLAY | ||||
| // The following methods display Schemas. | ||||
| // | ||||
|  | ||||
| // Description returns a string representation of a string or string array. | ||||
| func (s *StringOrStringArray) Description() string { | ||||
| 	if s.String != nil { | ||||
| 		return *s.String | ||||
| 	} | ||||
| 	if s.StringArray != nil { | ||||
| 		return strings.Join(*s.StringArray, ", ") | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // Returns a string representation of a Schema. | ||||
| func (schema *Schema) String() string { | ||||
| 	return schema.describeSchema("") | ||||
| } | ||||
|  | ||||
| // Helper: Returns a string representation of a Schema indented by a specified string. | ||||
| func (schema *Schema) describeSchema(indent string) string { | ||||
| 	result := "" | ||||
| 	if schema.Schema != nil { | ||||
| 		result += indent + "$schema: " + *(schema.Schema) + "\n" | ||||
| 	} | ||||
| 	if schema.ID != nil { | ||||
| 		result += indent + "id: " + *(schema.ID) + "\n" | ||||
| 	} | ||||
| 	if schema.MultipleOf != nil { | ||||
| 		result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) | ||||
| 	} | ||||
| 	if schema.Maximum != nil { | ||||
| 		result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) | ||||
| 	} | ||||
| 	if schema.ExclusiveMaximum != nil { | ||||
| 		result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) | ||||
| 	} | ||||
| 	if schema.Minimum != nil { | ||||
| 		result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) | ||||
| 	} | ||||
| 	if schema.ExclusiveMinimum != nil { | ||||
| 		result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) | ||||
| 	} | ||||
| 	if schema.MaxLength != nil { | ||||
| 		result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) | ||||
| 	} | ||||
| 	if schema.MinLength != nil { | ||||
| 		result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) | ||||
| 	} | ||||
| 	if schema.Pattern != nil { | ||||
| 		result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) | ||||
| 	} | ||||
| 	if schema.AdditionalItems != nil { | ||||
| 		s := schema.AdditionalItems.Schema | ||||
| 		if s != nil { | ||||
| 			result += indent + "additionalItems:\n" | ||||
| 			result += s.describeSchema(indent + "  ") | ||||
| 		} else { | ||||
| 			b := *(schema.AdditionalItems.Boolean) | ||||
| 			result += indent + fmt.Sprintf("additionalItems: %+v\n", b) | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Items != nil { | ||||
| 		result += indent + "items:\n" | ||||
| 		items := schema.Items | ||||
| 		if items.SchemaArray != nil { | ||||
| 			for i, s := range *(items.SchemaArray) { | ||||
| 				result += indent + "  " + fmt.Sprintf("%d", i) + ":\n" | ||||
| 				result += s.describeSchema(indent + "  " + "  ") | ||||
| 			} | ||||
| 		} else if items.Schema != nil { | ||||
| 			result += items.Schema.describeSchema(indent + "  " + "  ") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.MaxItems != nil { | ||||
| 		result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) | ||||
| 	} | ||||
| 	if schema.MinItems != nil { | ||||
| 		result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) | ||||
| 	} | ||||
| 	if schema.UniqueItems != nil { | ||||
| 		result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) | ||||
| 	} | ||||
| 	if schema.MaxProperties != nil { | ||||
| 		result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) | ||||
| 	} | ||||
| 	if schema.MinProperties != nil { | ||||
| 		result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) | ||||
| 	} | ||||
| 	if schema.Required != nil { | ||||
| 		result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) | ||||
| 	} | ||||
| 	if schema.AdditionalProperties != nil { | ||||
| 		s := schema.AdditionalProperties.Schema | ||||
| 		if s != nil { | ||||
| 			result += indent + "additionalProperties:\n" | ||||
| 			result += s.describeSchema(indent + "  ") | ||||
| 		} else { | ||||
| 			b := *(schema.AdditionalProperties.Boolean) | ||||
| 			result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Properties != nil { | ||||
| 		result += indent + "properties:\n" | ||||
| 		for _, pair := range *(schema.Properties) { | ||||
| 			name := pair.Name | ||||
| 			s := pair.Value | ||||
| 			result += indent + "  " + name + ":\n" | ||||
| 			result += s.describeSchema(indent + "  " + "  ") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.PatternProperties != nil { | ||||
| 		result += indent + "patternProperties:\n" | ||||
| 		for _, pair := range *(schema.PatternProperties) { | ||||
| 			name := pair.Name | ||||
| 			s := pair.Value | ||||
| 			result += indent + "  " + name + ":\n" | ||||
| 			result += s.describeSchema(indent + "  " + "  ") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Dependencies != nil { | ||||
| 		result += indent + "dependencies:\n" | ||||
| 		for _, pair := range *(schema.Dependencies) { | ||||
| 			name := pair.Name | ||||
| 			schemaOrStringArray := pair.Value | ||||
| 			s := schemaOrStringArray.Schema | ||||
| 			if s != nil { | ||||
| 				result += indent + "  " + name + ":\n" | ||||
| 				result += s.describeSchema(indent + "  " + "  ") | ||||
| 			} else { | ||||
| 				a := schemaOrStringArray.StringArray | ||||
| 				if a != nil { | ||||
| 					result += indent + "  " + name + ":\n" | ||||
| 					for _, s2 := range *a { | ||||
| 						result += indent + "  " + "  " + s2 + "\n" | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Enumeration != nil { | ||||
| 		result += indent + "enumeration:\n" | ||||
| 		for _, value := range *(schema.Enumeration) { | ||||
| 			if value.String != nil { | ||||
| 				result += indent + "  " + fmt.Sprintf("%+v\n", *value.String) | ||||
| 			} else { | ||||
| 				result += indent + "  " + fmt.Sprintf("%+v\n", *value.Bool) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Type != nil { | ||||
| 		result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) | ||||
| 	} | ||||
| 	if schema.AllOf != nil { | ||||
| 		result += indent + "allOf:\n" | ||||
| 		for _, s := range *(schema.AllOf) { | ||||
| 			result += s.describeSchema(indent + "  ") | ||||
| 			result += indent + "-\n" | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.AnyOf != nil { | ||||
| 		result += indent + "anyOf:\n" | ||||
| 		for _, s := range *(schema.AnyOf) { | ||||
| 			result += s.describeSchema(indent + "  ") | ||||
| 			result += indent + "-\n" | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.OneOf != nil { | ||||
| 		result += indent + "oneOf:\n" | ||||
| 		for _, s := range *(schema.OneOf) { | ||||
| 			result += s.describeSchema(indent + "  ") | ||||
| 			result += indent + "-\n" | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Not != nil { | ||||
| 		result += indent + "not:\n" | ||||
| 		result += schema.Not.describeSchema(indent + "  ") | ||||
| 	} | ||||
| 	if schema.Definitions != nil { | ||||
| 		result += indent + "definitions:\n" | ||||
| 		for _, pair := range *(schema.Definitions) { | ||||
| 			name := pair.Name | ||||
| 			s := pair.Value | ||||
| 			result += indent + "  " + name + ":\n" | ||||
| 			result += s.describeSchema(indent + "  " + "  ") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Title != nil { | ||||
| 		result += indent + "title: " + *(schema.Title) + "\n" | ||||
| 	} | ||||
| 	if schema.Description != nil { | ||||
| 		result += indent + "description: " + *(schema.Description) + "\n" | ||||
| 	} | ||||
| 	if schema.Default != nil { | ||||
| 		result += indent + "default:\n" | ||||
| 		result += indent + fmt.Sprintf("  %+v\n", *(schema.Default)) | ||||
| 	} | ||||
| 	if schema.Format != nil { | ||||
| 		result += indent + "format: " + *(schema.Format) + "\n" | ||||
| 	} | ||||
| 	if schema.Ref != nil { | ||||
| 		result += indent + "$ref: " + *(schema.Ref) + "\n" | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										228
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/models.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										228
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/models.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,228 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Package jsonschema supports the reading, writing, and manipulation | ||||
| // of JSON Schemas. | ||||
| package jsonschema | ||||
|  | ||||
| import "gopkg.in/yaml.v3" | ||||
|  | ||||
| // The Schema struct models a JSON Schema and, because schemas are | ||||
| // defined hierarchically, contains many references to itself. | ||||
| // All fields are pointers and are nil if the associated values | ||||
| // are not specified. | ||||
| type Schema struct { | ||||
| 	Schema *string // $schema | ||||
| 	ID     *string // id keyword used for $ref resolution scope | ||||
| 	Ref    *string // $ref, i.e. JSON Pointers | ||||
|  | ||||
| 	// http://json-schema.org/latest/json-schema-validation.html | ||||
| 	// 5.1.  Validation keywords for numeric instances (number and integer) | ||||
| 	MultipleOf       *SchemaNumber | ||||
| 	Maximum          *SchemaNumber | ||||
| 	ExclusiveMaximum *bool | ||||
| 	Minimum          *SchemaNumber | ||||
| 	ExclusiveMinimum *bool | ||||
|  | ||||
| 	// 5.2.  Validation keywords for strings | ||||
| 	MaxLength *int64 | ||||
| 	MinLength *int64 | ||||
| 	Pattern   *string | ||||
|  | ||||
| 	// 5.3.  Validation keywords for arrays | ||||
| 	AdditionalItems *SchemaOrBoolean | ||||
| 	Items           *SchemaOrSchemaArray | ||||
| 	MaxItems        *int64 | ||||
| 	MinItems        *int64 | ||||
| 	UniqueItems     *bool | ||||
|  | ||||
| 	// 5.4.  Validation keywords for objects | ||||
| 	MaxProperties        *int64 | ||||
| 	MinProperties        *int64 | ||||
| 	Required             *[]string | ||||
| 	AdditionalProperties *SchemaOrBoolean | ||||
| 	Properties           *[]*NamedSchema | ||||
| 	PatternProperties    *[]*NamedSchema | ||||
| 	Dependencies         *[]*NamedSchemaOrStringArray | ||||
|  | ||||
| 	// 5.5.  Validation keywords for any instance type | ||||
| 	Enumeration *[]SchemaEnumValue | ||||
| 	Type        *StringOrStringArray | ||||
| 	AllOf       *[]*Schema | ||||
| 	AnyOf       *[]*Schema | ||||
| 	OneOf       *[]*Schema | ||||
| 	Not         *Schema | ||||
| 	Definitions *[]*NamedSchema | ||||
|  | ||||
| 	// 6.  Metadata keywords | ||||
| 	Title       *string | ||||
| 	Description *string | ||||
| 	Default     *yaml.Node | ||||
|  | ||||
| 	// 7.  Semantic validation with "format" | ||||
| 	Format *string | ||||
| } | ||||
|  | ||||
| // These helper structs represent "combination" types that generally can | ||||
| // have values of one type or another. All are used to represent parts | ||||
| // of Schemas. | ||||
|  | ||||
| // SchemaNumber represents a value that can be either an Integer or a Float. | ||||
| type SchemaNumber struct { | ||||
| 	Integer *int64 | ||||
| 	Float   *float64 | ||||
| } | ||||
|  | ||||
| // NewSchemaNumberWithInteger creates and returns a new object | ||||
| func NewSchemaNumberWithInteger(i int64) *SchemaNumber { | ||||
| 	result := &SchemaNumber{} | ||||
| 	result.Integer = &i | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // NewSchemaNumberWithFloat creates and returns a new object | ||||
| func NewSchemaNumberWithFloat(f float64) *SchemaNumber { | ||||
| 	result := &SchemaNumber{} | ||||
| 	result.Float = &f | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // SchemaOrBoolean represents a value that can be either a Schema or a Boolean. | ||||
| type SchemaOrBoolean struct { | ||||
| 	Schema  *Schema | ||||
| 	Boolean *bool | ||||
| } | ||||
|  | ||||
| // NewSchemaOrBooleanWithSchema creates and returns a new object | ||||
| func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { | ||||
| 	result := &SchemaOrBoolean{} | ||||
| 	result.Schema = s | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // NewSchemaOrBooleanWithBoolean creates and returns a new object | ||||
| func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { | ||||
| 	result := &SchemaOrBoolean{} | ||||
| 	result.Boolean = &b | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // StringOrStringArray represents a value that can be either | ||||
| // a String or an Array of Strings. | ||||
| type StringOrStringArray struct { | ||||
| 	String      *string | ||||
| 	StringArray *[]string | ||||
| } | ||||
|  | ||||
| // NewStringOrStringArrayWithString creates and returns a new object | ||||
| func NewStringOrStringArrayWithString(s string) *StringOrStringArray { | ||||
| 	result := &StringOrStringArray{} | ||||
| 	result.String = &s | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // NewStringOrStringArrayWithStringArray creates and returns a new object | ||||
| func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { | ||||
| 	result := &StringOrStringArray{} | ||||
| 	result.StringArray = &a | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // SchemaOrStringArray represents a value that can be either | ||||
| // a Schema or an Array of Strings. | ||||
| type SchemaOrStringArray struct { | ||||
| 	Schema      *Schema | ||||
| 	StringArray *[]string | ||||
| } | ||||
|  | ||||
| // SchemaOrSchemaArray represents a value that can be either | ||||
| // a Schema or an Array of Schemas. | ||||
| type SchemaOrSchemaArray struct { | ||||
| 	Schema      *Schema | ||||
| 	SchemaArray *[]*Schema | ||||
| } | ||||
|  | ||||
| // NewSchemaOrSchemaArrayWithSchema creates and returns a new object | ||||
| func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { | ||||
| 	result := &SchemaOrSchemaArray{} | ||||
| 	result.Schema = s | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object | ||||
| func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { | ||||
| 	result := &SchemaOrSchemaArray{} | ||||
| 	result.SchemaArray = &a | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // SchemaEnumValue represents a value that can be part of an | ||||
| // enumeration in a Schema. | ||||
| type SchemaEnumValue struct { | ||||
| 	String *string | ||||
| 	Bool   *bool | ||||
| } | ||||
|  | ||||
| // NamedSchema is a name-value pair that is used to emulate maps | ||||
| // with ordered keys. | ||||
| type NamedSchema struct { | ||||
| 	Name  string | ||||
| 	Value *Schema | ||||
| } | ||||
|  | ||||
| // NewNamedSchema creates and returns a new object | ||||
| func NewNamedSchema(name string, value *Schema) *NamedSchema { | ||||
| 	return &NamedSchema{Name: name, Value: value} | ||||
| } | ||||
|  | ||||
| // NamedSchemaOrStringArray is a name-value pair that is used | ||||
| // to emulate maps with ordered keys. | ||||
| type NamedSchemaOrStringArray struct { | ||||
| 	Name  string | ||||
| 	Value *SchemaOrStringArray | ||||
| } | ||||
|  | ||||
| // Access named subschemas by name | ||||
|  | ||||
| func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { | ||||
| 	if array == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	for _, pair := range *array { | ||||
| 		if pair.Name == name { | ||||
| 			return pair.Value | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // PropertyWithName returns the selected element. | ||||
| func (s *Schema) PropertyWithName(name string) *Schema { | ||||
| 	return namedSchemaArrayElementWithName(s.Properties, name) | ||||
| } | ||||
|  | ||||
| // PatternPropertyWithName returns the selected element. | ||||
| func (s *Schema) PatternPropertyWithName(name string) *Schema { | ||||
| 	return namedSchemaArrayElementWithName(s.PatternProperties, name) | ||||
| } | ||||
|  | ||||
| // DefinitionWithName returns the selected element. | ||||
| func (s *Schema) DefinitionWithName(name string) *Schema { | ||||
| 	return namedSchemaArrayElementWithName(s.Definitions, name) | ||||
| } | ||||
|  | ||||
| // AddProperty adds a named property. | ||||
| func (s *Schema) AddProperty(name string, property *Schema) { | ||||
| 	*s.Properties = append(*s.Properties, NewNamedSchema(name, property)) | ||||
| } | ||||
							
								
								
									
										394
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/operations.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										394
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/operations.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,394 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jsonschema | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // | ||||
| // OPERATIONS | ||||
| // The following methods perform operations on Schemas. | ||||
| // | ||||
|  | ||||
| // IsEmpty returns true if no members of the Schema are specified. | ||||
| func (schema *Schema) IsEmpty() bool { | ||||
| 	return (schema.Schema == nil) && | ||||
| 		(schema.ID == nil) && | ||||
| 		(schema.MultipleOf == nil) && | ||||
| 		(schema.Maximum == nil) && | ||||
| 		(schema.ExclusiveMaximum == nil) && | ||||
| 		(schema.Minimum == nil) && | ||||
| 		(schema.ExclusiveMinimum == nil) && | ||||
| 		(schema.MaxLength == nil) && | ||||
| 		(schema.MinLength == nil) && | ||||
| 		(schema.Pattern == nil) && | ||||
| 		(schema.AdditionalItems == nil) && | ||||
| 		(schema.Items == nil) && | ||||
| 		(schema.MaxItems == nil) && | ||||
| 		(schema.MinItems == nil) && | ||||
| 		(schema.UniqueItems == nil) && | ||||
| 		(schema.MaxProperties == nil) && | ||||
| 		(schema.MinProperties == nil) && | ||||
| 		(schema.Required == nil) && | ||||
| 		(schema.AdditionalProperties == nil) && | ||||
| 		(schema.Properties == nil) && | ||||
| 		(schema.PatternProperties == nil) && | ||||
| 		(schema.Dependencies == nil) && | ||||
| 		(schema.Enumeration == nil) && | ||||
| 		(schema.Type == nil) && | ||||
| 		(schema.AllOf == nil) && | ||||
| 		(schema.AnyOf == nil) && | ||||
| 		(schema.OneOf == nil) && | ||||
| 		(schema.Not == nil) && | ||||
| 		(schema.Definitions == nil) && | ||||
| 		(schema.Title == nil) && | ||||
| 		(schema.Description == nil) && | ||||
| 		(schema.Default == nil) && | ||||
| 		(schema.Format == nil) && | ||||
| 		(schema.Ref == nil) | ||||
| } | ||||
|  | ||||
| // IsEqual returns true if two schemas are equal. | ||||
| func (schema *Schema) IsEqual(schema2 *Schema) bool { | ||||
| 	return schema.String() == schema2.String() | ||||
| } | ||||
|  | ||||
| // SchemaOperation represents a function that can be applied to a Schema. | ||||
| type SchemaOperation func(schema *Schema, context string) | ||||
|  | ||||
| // Applies a specified function to a Schema and all of the Schemas that it contains. | ||||
| func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { | ||||
|  | ||||
| 	if schema.AdditionalItems != nil { | ||||
| 		s := schema.AdditionalItems.Schema | ||||
| 		if s != nil { | ||||
| 			s.applyToSchemas(operation, "AdditionalItems") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.Items != nil { | ||||
| 		if schema.Items.SchemaArray != nil { | ||||
| 			for _, s := range *(schema.Items.SchemaArray) { | ||||
| 				s.applyToSchemas(operation, "Items.SchemaArray") | ||||
| 			} | ||||
| 		} else if schema.Items.Schema != nil { | ||||
| 			schema.Items.Schema.applyToSchemas(operation, "Items.Schema") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.AdditionalProperties != nil { | ||||
| 		s := schema.AdditionalProperties.Schema | ||||
| 		if s != nil { | ||||
| 			s.applyToSchemas(operation, "AdditionalProperties") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.Properties != nil { | ||||
| 		for _, pair := range *(schema.Properties) { | ||||
| 			s := pair.Value | ||||
| 			s.applyToSchemas(operation, "Properties") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.PatternProperties != nil { | ||||
| 		for _, pair := range *(schema.PatternProperties) { | ||||
| 			s := pair.Value | ||||
| 			s.applyToSchemas(operation, "PatternProperties") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.Dependencies != nil { | ||||
| 		for _, pair := range *(schema.Dependencies) { | ||||
| 			schemaOrStringArray := pair.Value | ||||
| 			s := schemaOrStringArray.Schema | ||||
| 			if s != nil { | ||||
| 				s.applyToSchemas(operation, "Dependencies") | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.AllOf != nil { | ||||
| 		for _, s := range *(schema.AllOf) { | ||||
| 			s.applyToSchemas(operation, "AllOf") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.AnyOf != nil { | ||||
| 		for _, s := range *(schema.AnyOf) { | ||||
| 			s.applyToSchemas(operation, "AnyOf") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.OneOf != nil { | ||||
| 		for _, s := range *(schema.OneOf) { | ||||
| 			s.applyToSchemas(operation, "OneOf") | ||||
| 		} | ||||
| 	} | ||||
| 	if schema.Not != nil { | ||||
| 		schema.Not.applyToSchemas(operation, "Not") | ||||
| 	} | ||||
|  | ||||
| 	if schema.Definitions != nil { | ||||
| 		for _, pair := range *(schema.Definitions) { | ||||
| 			s := pair.Value | ||||
| 			s.applyToSchemas(operation, "Definitions") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	operation(schema, context) | ||||
| } | ||||
|  | ||||
| // CopyProperties copies all non-nil properties from the source Schema to the schema Schema. | ||||
| func (schema *Schema) CopyProperties(source *Schema) { | ||||
| 	if source.Schema != nil { | ||||
| 		schema.Schema = source.Schema | ||||
| 	} | ||||
| 	if source.ID != nil { | ||||
| 		schema.ID = source.ID | ||||
| 	} | ||||
| 	if source.MultipleOf != nil { | ||||
| 		schema.MultipleOf = source.MultipleOf | ||||
| 	} | ||||
| 	if source.Maximum != nil { | ||||
| 		schema.Maximum = source.Maximum | ||||
| 	} | ||||
| 	if source.ExclusiveMaximum != nil { | ||||
| 		schema.ExclusiveMaximum = source.ExclusiveMaximum | ||||
| 	} | ||||
| 	if source.Minimum != nil { | ||||
| 		schema.Minimum = source.Minimum | ||||
| 	} | ||||
| 	if source.ExclusiveMinimum != nil { | ||||
| 		schema.ExclusiveMinimum = source.ExclusiveMinimum | ||||
| 	} | ||||
| 	if source.MaxLength != nil { | ||||
| 		schema.MaxLength = source.MaxLength | ||||
| 	} | ||||
| 	if source.MinLength != nil { | ||||
| 		schema.MinLength = source.MinLength | ||||
| 	} | ||||
| 	if source.Pattern != nil { | ||||
| 		schema.Pattern = source.Pattern | ||||
| 	} | ||||
| 	if source.AdditionalItems != nil { | ||||
| 		schema.AdditionalItems = source.AdditionalItems | ||||
| 	} | ||||
| 	if source.Items != nil { | ||||
| 		schema.Items = source.Items | ||||
| 	} | ||||
| 	if source.MaxItems != nil { | ||||
| 		schema.MaxItems = source.MaxItems | ||||
| 	} | ||||
| 	if source.MinItems != nil { | ||||
| 		schema.MinItems = source.MinItems | ||||
| 	} | ||||
| 	if source.UniqueItems != nil { | ||||
| 		schema.UniqueItems = source.UniqueItems | ||||
| 	} | ||||
| 	if source.MaxProperties != nil { | ||||
| 		schema.MaxProperties = source.MaxProperties | ||||
| 	} | ||||
| 	if source.MinProperties != nil { | ||||
| 		schema.MinProperties = source.MinProperties | ||||
| 	} | ||||
| 	if source.Required != nil { | ||||
| 		schema.Required = source.Required | ||||
| 	} | ||||
| 	if source.AdditionalProperties != nil { | ||||
| 		schema.AdditionalProperties = source.AdditionalProperties | ||||
| 	} | ||||
| 	if source.Properties != nil { | ||||
| 		schema.Properties = source.Properties | ||||
| 	} | ||||
| 	if source.PatternProperties != nil { | ||||
| 		schema.PatternProperties = source.PatternProperties | ||||
| 	} | ||||
| 	if source.Dependencies != nil { | ||||
| 		schema.Dependencies = source.Dependencies | ||||
| 	} | ||||
| 	if source.Enumeration != nil { | ||||
| 		schema.Enumeration = source.Enumeration | ||||
| 	} | ||||
| 	if source.Type != nil { | ||||
| 		schema.Type = source.Type | ||||
| 	} | ||||
| 	if source.AllOf != nil { | ||||
| 		schema.AllOf = source.AllOf | ||||
| 	} | ||||
| 	if source.AnyOf != nil { | ||||
| 		schema.AnyOf = source.AnyOf | ||||
| 	} | ||||
| 	if source.OneOf != nil { | ||||
| 		schema.OneOf = source.OneOf | ||||
| 	} | ||||
| 	if source.Not != nil { | ||||
| 		schema.Not = source.Not | ||||
| 	} | ||||
| 	if source.Definitions != nil { | ||||
| 		schema.Definitions = source.Definitions | ||||
| 	} | ||||
| 	if source.Title != nil { | ||||
| 		schema.Title = source.Title | ||||
| 	} | ||||
| 	if source.Description != nil { | ||||
| 		schema.Description = source.Description | ||||
| 	} | ||||
| 	if source.Default != nil { | ||||
| 		schema.Default = source.Default | ||||
| 	} | ||||
| 	if source.Format != nil { | ||||
| 		schema.Format = source.Format | ||||
| 	} | ||||
| 	if source.Ref != nil { | ||||
| 		schema.Ref = source.Ref | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TypeIs returns true if the Type of a Schema includes the specified type | ||||
| func (schema *Schema) TypeIs(typeName string) bool { | ||||
| 	if schema.Type != nil { | ||||
| 		// the schema Type is either a string or an array of strings | ||||
| 		if schema.Type.String != nil { | ||||
| 			return (*(schema.Type.String) == typeName) | ||||
| 		} else if schema.Type.StringArray != nil { | ||||
| 			for _, n := range *(schema.Type.StringArray) { | ||||
| 				if n == typeName { | ||||
| 					return true | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // ResolveRefs resolves "$ref" elements in a Schema and its children. | ||||
| // But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, | ||||
| // the reference is kept and we expect downstream tools to separately model these | ||||
| // referenced schemas. | ||||
| func (schema *Schema) ResolveRefs() { | ||||
| 	rootSchema := schema | ||||
| 	count := 1 | ||||
| 	for count > 0 { | ||||
| 		count = 0 | ||||
| 		schema.applyToSchemas( | ||||
| 			func(schema *Schema, context string) { | ||||
| 				if schema.Ref != nil { | ||||
| 					resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) | ||||
| 					if err != nil { | ||||
| 						log.Printf("%+v", err) | ||||
| 					} else if resolvedRef.TypeIs("object") { | ||||
| 						// don't substitute for objects, we'll model the referenced schema with a class | ||||
| 					} else if context == "OneOf" { | ||||
| 						// don't substitute for references inside oneOf declarations | ||||
| 					} else if resolvedRef.OneOf != nil { | ||||
| 						// don't substitute for references that contain oneOf declarations | ||||
| 					} else if resolvedRef.AdditionalProperties != nil { | ||||
| 						// don't substitute for references that look like objects | ||||
| 					} else { | ||||
| 						schema.Ref = nil | ||||
| 						schema.CopyProperties(resolvedRef) | ||||
| 						count++ | ||||
| 					} | ||||
| 				} | ||||
| 			}, "") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // resolveJSONPointer resolves JSON pointers. | ||||
| // This current implementation is very crude and custom for OpenAPI 2.0 schemas. | ||||
| // It panics for any pointer that it is unable to resolve. | ||||
| func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { | ||||
| 	parts := strings.Split(ref, "#") | ||||
| 	if len(parts) == 2 { | ||||
| 		documentName := parts[0] + "#" | ||||
| 		if documentName == "#" && schema.ID != nil { | ||||
| 			documentName = *(schema.ID) | ||||
| 		} | ||||
| 		path := parts[1] | ||||
| 		document := schemas[documentName] | ||||
| 		pathParts := strings.Split(path, "/") | ||||
|  | ||||
| 		// we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases | ||||
| 		if len(pathParts) == 1 { | ||||
| 			return document, nil | ||||
| 		} else if len(pathParts) == 3 { | ||||
| 			switch pathParts[1] { | ||||
| 			case "definitions": | ||||
| 				dictionary := document.Definitions | ||||
| 				for _, pair := range *dictionary { | ||||
| 					if pair.Name == pathParts[2] { | ||||
| 						result = pair.Value | ||||
| 					} | ||||
| 				} | ||||
| 			case "properties": | ||||
| 				dictionary := document.Properties | ||||
| 				for _, pair := range *dictionary { | ||||
| 					if pair.Name == pathParts[2] { | ||||
| 						result = pair.Value | ||||
| 					} | ||||
| 				} | ||||
| 			default: | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if result == nil { | ||||
| 		return nil, fmt.Errorf("unresolved pointer: %+v", ref) | ||||
| 	} | ||||
| 	return result, nil | ||||
| } | ||||
|  | ||||
| // ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. | ||||
| func (schema *Schema) ResolveAllOfs() { | ||||
| 	schema.applyToSchemas( | ||||
| 		func(schema *Schema, context string) { | ||||
| 			if schema.AllOf != nil { | ||||
| 				for _, allOf := range *(schema.AllOf) { | ||||
| 					schema.CopyProperties(allOf) | ||||
| 				} | ||||
| 				schema.AllOf = nil | ||||
| 			} | ||||
| 		}, "resolveAllOfs") | ||||
| } | ||||
|  | ||||
| // ResolveAnyOfs replaces all "anyOf" elements with "oneOf". | ||||
| func (schema *Schema) ResolveAnyOfs() { | ||||
| 	schema.applyToSchemas( | ||||
| 		func(schema *Schema, context string) { | ||||
| 			if schema.AnyOf != nil { | ||||
| 				schema.OneOf = schema.AnyOf | ||||
| 				schema.AnyOf = nil | ||||
| 			} | ||||
| 		}, "resolveAnyOfs") | ||||
| } | ||||
|  | ||||
| // return a pointer to a copy of a passed-in string | ||||
| func stringptr(input string) (output *string) { | ||||
| 	return &input | ||||
| } | ||||
|  | ||||
| // CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition | ||||
| func (schema *Schema) CopyOfficialSchemaProperty(name string) { | ||||
| 	*schema.Properties = append(*schema.Properties, | ||||
| 		NewNamedSchema(name, | ||||
| 			&Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) | ||||
| } | ||||
|  | ||||
| // CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition | ||||
| func (schema *Schema) CopyOfficialSchemaProperties(names []string) { | ||||
| 	for _, name := range names { | ||||
| 		schema.CopyOfficialSchemaProperty(name) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										442
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										442
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/reader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,442 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| //go:generate go run generate-base.go | ||||
|  | ||||
| package jsonschema | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| // This is a global map of all known Schemas. | ||||
| // It is initialized when the first Schema is created and inserted. | ||||
| var schemas map[string]*Schema | ||||
|  | ||||
| // NewBaseSchema builds a schema object from an embedded json representation. | ||||
| func NewBaseSchema() (schema *Schema, err error) { | ||||
| 	b, err := baseSchemaBytes() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var node yaml.Node | ||||
| 	err = yaml.Unmarshal(b, &node) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return NewSchemaFromObject(&node), nil | ||||
| } | ||||
|  | ||||
| // NewSchemaFromFile reads a schema from a file. | ||||
| // Currently this assumes that schemas are stored in the source distribution of this project. | ||||
| func NewSchemaFromFile(filename string) (schema *Schema, err error) { | ||||
| 	file, err := ioutil.ReadFile(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var node yaml.Node | ||||
| 	err = yaml.Unmarshal(file, &node) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return NewSchemaFromObject(&node), nil | ||||
| } | ||||
|  | ||||
| // NewSchemaFromObject constructs a schema from a parsed JSON object. | ||||
| // Due to the complexity of the schema representation, this is a | ||||
| // custom reader and not the standard Go JSON reader (encoding/json). | ||||
| func NewSchemaFromObject(jsonData *yaml.Node) *Schema { | ||||
| 	switch jsonData.Kind { | ||||
| 	case yaml.DocumentNode: | ||||
| 		return NewSchemaFromObject(jsonData.Content[0]) | ||||
| 	case yaml.MappingNode: | ||||
| 		schema := &Schema{} | ||||
|  | ||||
| 		for i := 0; i < len(jsonData.Content); i += 2 { | ||||
| 			k := jsonData.Content[i].Value | ||||
| 			v := jsonData.Content[i+1] | ||||
|  | ||||
| 			switch k { | ||||
| 			case "$schema": | ||||
| 				schema.Schema = schema.stringValue(v) | ||||
| 			case "id": | ||||
| 				schema.ID = schema.stringValue(v) | ||||
|  | ||||
| 			case "multipleOf": | ||||
| 				schema.MultipleOf = schema.numberValue(v) | ||||
| 			case "maximum": | ||||
| 				schema.Maximum = schema.numberValue(v) | ||||
| 			case "exclusiveMaximum": | ||||
| 				schema.ExclusiveMaximum = schema.boolValue(v) | ||||
| 			case "minimum": | ||||
| 				schema.Minimum = schema.numberValue(v) | ||||
| 			case "exclusiveMinimum": | ||||
| 				schema.ExclusiveMinimum = schema.boolValue(v) | ||||
|  | ||||
| 			case "maxLength": | ||||
| 				schema.MaxLength = schema.intValue(v) | ||||
| 			case "minLength": | ||||
| 				schema.MinLength = schema.intValue(v) | ||||
| 			case "pattern": | ||||
| 				schema.Pattern = schema.stringValue(v) | ||||
|  | ||||
| 			case "additionalItems": | ||||
| 				schema.AdditionalItems = schema.schemaOrBooleanValue(v) | ||||
| 			case "items": | ||||
| 				schema.Items = schema.schemaOrSchemaArrayValue(v) | ||||
| 			case "maxItems": | ||||
| 				schema.MaxItems = schema.intValue(v) | ||||
| 			case "minItems": | ||||
| 				schema.MinItems = schema.intValue(v) | ||||
| 			case "uniqueItems": | ||||
| 				schema.UniqueItems = schema.boolValue(v) | ||||
|  | ||||
| 			case "maxProperties": | ||||
| 				schema.MaxProperties = schema.intValue(v) | ||||
| 			case "minProperties": | ||||
| 				schema.MinProperties = schema.intValue(v) | ||||
| 			case "required": | ||||
| 				schema.Required = schema.arrayOfStringsValue(v) | ||||
| 			case "additionalProperties": | ||||
| 				schema.AdditionalProperties = schema.schemaOrBooleanValue(v) | ||||
| 			case "properties": | ||||
| 				schema.Properties = schema.mapOfSchemasValue(v) | ||||
| 			case "patternProperties": | ||||
| 				schema.PatternProperties = schema.mapOfSchemasValue(v) | ||||
| 			case "dependencies": | ||||
| 				schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) | ||||
|  | ||||
| 			case "enum": | ||||
| 				schema.Enumeration = schema.arrayOfEnumValuesValue(v) | ||||
|  | ||||
| 			case "type": | ||||
| 				schema.Type = schema.stringOrStringArrayValue(v) | ||||
| 			case "allOf": | ||||
| 				schema.AllOf = schema.arrayOfSchemasValue(v) | ||||
| 			case "anyOf": | ||||
| 				schema.AnyOf = schema.arrayOfSchemasValue(v) | ||||
| 			case "oneOf": | ||||
| 				schema.OneOf = schema.arrayOfSchemasValue(v) | ||||
| 			case "not": | ||||
| 				schema.Not = NewSchemaFromObject(v) | ||||
| 			case "definitions": | ||||
| 				schema.Definitions = schema.mapOfSchemasValue(v) | ||||
|  | ||||
| 			case "title": | ||||
| 				schema.Title = schema.stringValue(v) | ||||
| 			case "description": | ||||
| 				schema.Description = schema.stringValue(v) | ||||
|  | ||||
| 			case "default": | ||||
| 				schema.Default = v | ||||
|  | ||||
| 			case "format": | ||||
| 				schema.Format = schema.stringValue(v) | ||||
| 			case "$ref": | ||||
| 				schema.Ref = schema.stringValue(v) | ||||
| 			default: | ||||
| 				fmt.Printf("UNSUPPORTED (%s)\n", k) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// insert schema in global map | ||||
| 		if schema.ID != nil { | ||||
| 			if schemas == nil { | ||||
| 				schemas = make(map[string]*Schema, 0) | ||||
| 			} | ||||
| 			schemas[*(schema.ID)] = schema | ||||
| 		} | ||||
| 		return schema | ||||
|  | ||||
| 	default: | ||||
| 		fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // | ||||
| // BUILDERS | ||||
| // The following methods build elements of Schemas from interface{} values. | ||||
| // Each returns nil if it is unable to build the desired element. | ||||
| // | ||||
|  | ||||
| // Gets the string value of an interface{} value if possible. | ||||
| func (schema *Schema) stringValue(v *yaml.Node) *string { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		return &v.Value | ||||
| 	default: | ||||
| 		fmt.Printf("stringValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets the numeric value of an interface{} value if possible. | ||||
| func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { | ||||
| 	number := &SchemaNumber{} | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		switch v.Tag { | ||||
| 		case "!!float": | ||||
| 			v2, _ := strconv.ParseFloat(v.Value, 64) | ||||
| 			number.Float = &v2 | ||||
| 			return number | ||||
| 		case "!!int": | ||||
| 			v2, _ := strconv.ParseInt(v.Value, 10, 64) | ||||
| 			number.Integer = &v2 | ||||
| 			return number | ||||
| 		default: | ||||
| 			fmt.Printf("stringValue: unexpected node %+v\n", v) | ||||
| 		} | ||||
| 	default: | ||||
| 		fmt.Printf("stringValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets the integer value of an interface{} value if possible. | ||||
| func (schema *Schema) intValue(v *yaml.Node) *int64 { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		switch v.Tag { | ||||
| 		case "!!float": | ||||
| 			v2, _ := strconv.ParseFloat(v.Value, 64) | ||||
| 			v3 := int64(v2) | ||||
| 			return &v3 | ||||
| 		case "!!int": | ||||
| 			v2, _ := strconv.ParseInt(v.Value, 10, 64) | ||||
| 			return &v2 | ||||
| 		default: | ||||
| 			fmt.Printf("intValue: unexpected node %+v\n", v) | ||||
| 		} | ||||
| 	default: | ||||
| 		fmt.Printf("intValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets the bool value of an interface{} value if possible. | ||||
| func (schema *Schema) boolValue(v *yaml.Node) *bool { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		switch v.Tag { | ||||
| 		case "!!bool": | ||||
| 			v2, _ := strconv.ParseBool(v.Value) | ||||
| 			return &v2 | ||||
| 		default: | ||||
| 			fmt.Printf("boolValue: unexpected node %+v\n", v) | ||||
| 		} | ||||
| 	default: | ||||
| 		fmt.Printf("boolValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets a map of Schemas from an interface{} value if possible. | ||||
| func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.MappingNode: | ||||
| 		m := make([]*NamedSchema, 0) | ||||
| 		for i := 0; i < len(v.Content); i += 2 { | ||||
| 			k2 := v.Content[i].Value | ||||
| 			v2 := v.Content[i+1] | ||||
| 			pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} | ||||
| 			m = append(m, pair) | ||||
| 		} | ||||
| 		return &m | ||||
| 	default: | ||||
| 		fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets an array of Schemas from an interface{} value if possible. | ||||
| func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.SequenceNode: | ||||
| 		m := make([]*Schema, 0) | ||||
| 		for _, v2 := range v.Content { | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.MappingNode: | ||||
| 				s := NewSchemaFromObject(v2) | ||||
| 				m = append(m, s) | ||||
| 			default: | ||||
| 				fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 		return &m | ||||
| 	case yaml.MappingNode: | ||||
| 		m := make([]*Schema, 0) | ||||
| 		s := NewSchemaFromObject(v) | ||||
| 		m = append(m, s) | ||||
| 		return &m | ||||
| 	default: | ||||
| 		fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets a Schema or an array of Schemas from an interface{} value if possible. | ||||
| func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.SequenceNode: | ||||
| 		m := make([]*Schema, 0) | ||||
| 		for _, v2 := range v.Content { | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.MappingNode: | ||||
| 				s := NewSchemaFromObject(v2) | ||||
| 				m = append(m, s) | ||||
| 			default: | ||||
| 				fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 		return &SchemaOrSchemaArray{SchemaArray: &m} | ||||
| 	case yaml.MappingNode: | ||||
| 		s := NewSchemaFromObject(v) | ||||
| 		return &SchemaOrSchemaArray{Schema: s} | ||||
| 	default: | ||||
| 		fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets an array of strings from an interface{} value if possible. | ||||
| func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		a := []string{v.Value} | ||||
| 		return &a | ||||
| 	case yaml.SequenceNode: | ||||
| 		a := make([]string, 0) | ||||
| 		for _, v2 := range v.Content { | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.ScalarNode: | ||||
| 				a = append(a, v2.Value) | ||||
| 			default: | ||||
| 				fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 		return &a | ||||
| 	default: | ||||
| 		fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets a string or an array of strings from an interface{} value if possible. | ||||
| func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		s := &StringOrStringArray{} | ||||
| 		s.String = &v.Value | ||||
| 		return s | ||||
| 	case yaml.SequenceNode: | ||||
| 		a := make([]string, 0) | ||||
| 		for _, v2 := range v.Content { | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.ScalarNode: | ||||
| 				a = append(a, v2.Value) | ||||
| 			default: | ||||
| 				fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 		s := &StringOrStringArray{} | ||||
| 		s.StringArray = &a | ||||
| 		return s | ||||
| 	default: | ||||
| 		fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gets an array of enum values from an interface{} value if possible. | ||||
| func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { | ||||
| 	a := make([]SchemaEnumValue, 0) | ||||
| 	switch v.Kind { | ||||
| 	case yaml.SequenceNode: | ||||
| 		for _, v2 := range v.Content { | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.ScalarNode: | ||||
| 				switch v2.Tag { | ||||
| 				case "!!str": | ||||
| 					a = append(a, SchemaEnumValue{String: &v2.Value}) | ||||
| 				case "!!bool": | ||||
| 					v3, _ := strconv.ParseBool(v2.Value) | ||||
| 					a = append(a, SchemaEnumValue{Bool: &v3}) | ||||
| 				default: | ||||
| 					fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) | ||||
| 				} | ||||
| 			default: | ||||
| 				fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 	default: | ||||
| 		fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return &a | ||||
| } | ||||
|  | ||||
| // Gets a map of schemas or string arrays from an interface{} value if possible. | ||||
| func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { | ||||
| 	m := make([]*NamedSchemaOrStringArray, 0) | ||||
| 	switch v.Kind { | ||||
| 	case yaml.MappingNode: | ||||
| 		for i := 0; i < len(v.Content); i += 2 { | ||||
| 			k2 := v.Content[i].Value | ||||
| 			v2 := v.Content[i+1] | ||||
| 			switch v2.Kind { | ||||
| 			case yaml.SequenceNode: | ||||
| 				a := make([]string, 0) | ||||
| 				for _, v3 := range v2.Content { | ||||
| 					switch v3.Kind { | ||||
| 					case yaml.ScalarNode: | ||||
| 						a = append(a, v3.Value) | ||||
| 					default: | ||||
| 						fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) | ||||
| 					} | ||||
| 				} | ||||
| 				s := &SchemaOrStringArray{} | ||||
| 				s.StringArray = &a | ||||
| 				pair := &NamedSchemaOrStringArray{Name: k2, Value: s} | ||||
| 				m = append(m, pair) | ||||
| 			default: | ||||
| 				fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) | ||||
| 			} | ||||
| 		} | ||||
| 	default: | ||||
| 		fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return &m | ||||
| } | ||||
|  | ||||
| // Gets a schema or a boolean value from an interface{} value if possible. | ||||
| func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { | ||||
| 	schemaOrBoolean := &SchemaOrBoolean{} | ||||
| 	switch v.Kind { | ||||
| 	case yaml.ScalarNode: | ||||
| 		v2, _ := strconv.ParseBool(v.Value) | ||||
| 		schemaOrBoolean.Boolean = &v2 | ||||
| 	case yaml.MappingNode: | ||||
| 		schemaOrBoolean.Schema = NewSchemaFromObject(v) | ||||
| 	default: | ||||
| 		fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) | ||||
| 	} | ||||
| 	return schemaOrBoolean | ||||
| } | ||||
							
								
								
									
										150
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/schema.json
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										150
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/schema.json
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,150 +0,0 @@ | ||||
| { | ||||
|     "id": "http://json-schema.org/draft-04/schema#", | ||||
|     "$schema": "http://json-schema.org/draft-04/schema#", | ||||
|     "description": "Core schema meta-schema", | ||||
|     "definitions": { | ||||
|         "schemaArray": { | ||||
|             "type": "array", | ||||
|             "minItems": 1, | ||||
|             "items": { "$ref": "#" } | ||||
|         }, | ||||
|         "positiveInteger": { | ||||
|             "type": "integer", | ||||
|             "minimum": 0 | ||||
|         }, | ||||
|         "positiveIntegerDefault0": { | ||||
|             "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] | ||||
|         }, | ||||
|         "simpleTypes": { | ||||
|             "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] | ||||
|         }, | ||||
|         "stringArray": { | ||||
|             "type": "array", | ||||
|             "items": { "type": "string" }, | ||||
|             "minItems": 1, | ||||
|             "uniqueItems": true | ||||
|         } | ||||
|     }, | ||||
|     "type": "object", | ||||
|     "properties": { | ||||
|         "id": { | ||||
|             "type": "string", | ||||
|             "format": "uri" | ||||
|         }, | ||||
|         "$schema": { | ||||
|             "type": "string", | ||||
|             "format": "uri" | ||||
|         }, | ||||
|         "title": { | ||||
|             "type": "string" | ||||
|         }, | ||||
|         "description": { | ||||
|             "type": "string" | ||||
|         }, | ||||
|         "default": {}, | ||||
|         "multipleOf": { | ||||
|             "type": "number", | ||||
|             "minimum": 0, | ||||
|             "exclusiveMinimum": true | ||||
|         }, | ||||
|         "maximum": { | ||||
|             "type": "number" | ||||
|         }, | ||||
|         "exclusiveMaximum": { | ||||
|             "type": "boolean", | ||||
|             "default": false | ||||
|         }, | ||||
|         "minimum": { | ||||
|             "type": "number" | ||||
|         }, | ||||
|         "exclusiveMinimum": { | ||||
|             "type": "boolean", | ||||
|             "default": false | ||||
|         }, | ||||
|         "maxLength": { "$ref": "#/definitions/positiveInteger" }, | ||||
|         "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, | ||||
|         "pattern": { | ||||
|             "type": "string", | ||||
|             "format": "regex" | ||||
|         }, | ||||
|         "additionalItems": { | ||||
|             "anyOf": [ | ||||
|                 { "type": "boolean" }, | ||||
|                 { "$ref": "#" } | ||||
|             ], | ||||
|             "default": {} | ||||
|         }, | ||||
|         "items": { | ||||
|             "anyOf": [ | ||||
|                 { "$ref": "#" }, | ||||
|                 { "$ref": "#/definitions/schemaArray" } | ||||
|             ], | ||||
|             "default": {} | ||||
|         }, | ||||
|         "maxItems": { "$ref": "#/definitions/positiveInteger" }, | ||||
|         "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, | ||||
|         "uniqueItems": { | ||||
|             "type": "boolean", | ||||
|             "default": false | ||||
|         }, | ||||
|         "maxProperties": { "$ref": "#/definitions/positiveInteger" }, | ||||
|         "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, | ||||
|         "required": { "$ref": "#/definitions/stringArray" }, | ||||
|         "additionalProperties": { | ||||
|             "anyOf": [ | ||||
|                 { "type": "boolean" }, | ||||
|                 { "$ref": "#" } | ||||
|             ], | ||||
|             "default": {} | ||||
|         }, | ||||
|         "definitions": { | ||||
|             "type": "object", | ||||
|             "additionalProperties": { "$ref": "#" }, | ||||
|             "default": {} | ||||
|         }, | ||||
|         "properties": { | ||||
|             "type": "object", | ||||
|             "additionalProperties": { "$ref": "#" }, | ||||
|             "default": {} | ||||
|         }, | ||||
|         "patternProperties": { | ||||
|             "type": "object", | ||||
|             "additionalProperties": { "$ref": "#" }, | ||||
|             "default": {} | ||||
|         }, | ||||
|         "dependencies": { | ||||
|             "type": "object", | ||||
|             "additionalProperties": { | ||||
|                 "anyOf": [ | ||||
|                     { "$ref": "#" }, | ||||
|                     { "$ref": "#/definitions/stringArray" } | ||||
|                 ] | ||||
|             } | ||||
|         }, | ||||
|         "enum": { | ||||
|             "type": "array", | ||||
|             "minItems": 1, | ||||
|             "uniqueItems": true | ||||
|         }, | ||||
|         "type": { | ||||
|             "anyOf": [ | ||||
|                 { "$ref": "#/definitions/simpleTypes" }, | ||||
|                 { | ||||
|                     "type": "array", | ||||
|                     "items": { "$ref": "#/definitions/simpleTypes" }, | ||||
|                     "minItems": 1, | ||||
|                     "uniqueItems": true | ||||
|                 } | ||||
|             ] | ||||
|         }, | ||||
|         "allOf": { "$ref": "#/definitions/schemaArray" }, | ||||
|         "anyOf": { "$ref": "#/definitions/schemaArray" }, | ||||
|         "oneOf": { "$ref": "#/definitions/schemaArray" }, | ||||
|         "not": { "$ref": "#" } | ||||
|     }, | ||||
|     "dependencies": { | ||||
|         "exclusiveMaximum": [ "maximum" ], | ||||
|         "exclusiveMinimum": [ "minimum" ] | ||||
|     }, | ||||
|     "default": {} | ||||
| } | ||||
							
								
								
									
										369
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										369
									
								
								vendor/github.com/googleapis/gnostic/jsonschema/writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,369 +0,0 @@ | ||||
| // Copyright 2017 Google LLC. All Rights Reserved. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jsonschema | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"gopkg.in/yaml.v3" | ||||
| ) | ||||
|  | ||||
| const indentation = "  " | ||||
|  | ||||
| func renderMappingNode(node *yaml.Node, indent string) (result string) { | ||||
| 	result = "{\n" | ||||
| 	innerIndent := indent + indentation | ||||
| 	for i := 0; i < len(node.Content); i += 2 { | ||||
| 		// first print the key | ||||
| 		key := node.Content[i].Value | ||||
| 		result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) | ||||
| 		// then the value | ||||
| 		value := node.Content[i+1] | ||||
| 		switch value.Kind { | ||||
| 		case yaml.ScalarNode: | ||||
| 			result += "\"" + value.Value + "\"" | ||||
| 		case yaml.MappingNode: | ||||
| 			result += renderMappingNode(value, innerIndent) | ||||
| 		case yaml.SequenceNode: | ||||
| 			result += renderSequenceNode(value, innerIndent) | ||||
| 		default: | ||||
| 			result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) | ||||
| 		} | ||||
| 		if i < len(node.Content)-2 { | ||||
| 			result += "," | ||||
| 		} | ||||
| 		result += "\n" | ||||
| 	} | ||||
|  | ||||
| 	result += indent + "}" | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func renderSequenceNode(node *yaml.Node, indent string) (result string) { | ||||
| 	result = "[\n" | ||||
| 	innerIndent := indent + indentation | ||||
| 	for i := 0; i < len(node.Content); i++ { | ||||
| 		item := node.Content[i] | ||||
| 		switch item.Kind { | ||||
| 		case yaml.ScalarNode: | ||||
| 			result += innerIndent + "\"" + item.Value + "\"" | ||||
| 		case yaml.MappingNode: | ||||
| 			result += innerIndent + renderMappingNode(item, innerIndent) + "" | ||||
| 		default: | ||||
| 			result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) | ||||
| 		} | ||||
| 		if i < len(node.Content)-1 { | ||||
| 			result += "," | ||||
| 		} | ||||
| 		result += "\n" | ||||
| 	} | ||||
| 	result += indent + "]" | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func renderStringArray(array []string, indent string) (result string) { | ||||
| 	result = "[\n" | ||||
| 	innerIndent := indent + indentation | ||||
| 	for i, item := range array { | ||||
| 		result += innerIndent + "\"" + item + "\"" | ||||
| 		if i < len(array)-1 { | ||||
| 			result += "," | ||||
| 		} | ||||
| 		result += "\n" | ||||
| 	} | ||||
| 	result += indent + "]" | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // Render renders a yaml.Node as JSON | ||||
| func Render(node *yaml.Node) string { | ||||
| 	if node.Kind == yaml.DocumentNode { | ||||
| 		if len(node.Content) == 1 { | ||||
| 			return Render(node.Content[0]) | ||||
| 		} | ||||
| 	} else if node.Kind == yaml.MappingNode { | ||||
| 		return renderMappingNode(node, "") + "\n" | ||||
| 	} else if node.Kind == yaml.SequenceNode { | ||||
| 		return renderSequenceNode(node, "") + "\n" | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (object *SchemaNumber) nodeValue() *yaml.Node { | ||||
| 	if object.Integer != nil { | ||||
| 		return nodeForInt64(*object.Integer) | ||||
| 	} else if object.Float != nil { | ||||
| 		return nodeForFloat64(*object.Float) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (object *SchemaOrBoolean) nodeValue() *yaml.Node { | ||||
| 	if object.Schema != nil { | ||||
| 		return object.Schema.nodeValue() | ||||
| 	} else if object.Boolean != nil { | ||||
| 		return nodeForBoolean(*object.Boolean) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForStringArray(array []string) *yaml.Node { | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	for _, item := range array { | ||||
| 		content = append(content, nodeForString(item)) | ||||
| 	} | ||||
| 	return nodeForSequence(content) | ||||
| } | ||||
|  | ||||
| func nodeForSchemaArray(array []*Schema) *yaml.Node { | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	for _, item := range array { | ||||
| 		content = append(content, item.nodeValue()) | ||||
| 	} | ||||
| 	return nodeForSequence(content) | ||||
| } | ||||
|  | ||||
| func (object *StringOrStringArray) nodeValue() *yaml.Node { | ||||
| 	if object.String != nil { | ||||
| 		return nodeForString(*object.String) | ||||
| 	} else if object.StringArray != nil { | ||||
| 		return nodeForStringArray(*(object.StringArray)) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (object *SchemaOrStringArray) nodeValue() *yaml.Node { | ||||
| 	if object.Schema != nil { | ||||
| 		return object.Schema.nodeValue() | ||||
| 	} else if object.StringArray != nil { | ||||
| 		return nodeForStringArray(*(object.StringArray)) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { | ||||
| 	if object.Schema != nil { | ||||
| 		return object.Schema.nodeValue() | ||||
| 	} else if object.SchemaArray != nil { | ||||
| 		return nodeForSchemaArray(*(object.SchemaArray)) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (object *SchemaEnumValue) nodeValue() *yaml.Node { | ||||
| 	if object.String != nil { | ||||
| 		return nodeForString(*object.String) | ||||
| 	} else if object.Bool != nil { | ||||
| 		return nodeForBoolean(*object.Bool) | ||||
| 	} else { | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	for _, pair := range *(array) { | ||||
| 		content = appendPair(content, pair.Name, pair.Value.nodeValue()) | ||||
| 	} | ||||
| 	return nodeForMapping(content) | ||||
| } | ||||
|  | ||||
| func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	for _, pair := range *(array) { | ||||
| 		content = appendPair(content, pair.Name, pair.Value.nodeValue()) | ||||
| 	} | ||||
| 	return nodeForMapping(content) | ||||
| } | ||||
|  | ||||
| func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	for _, item := range *array { | ||||
| 		content = append(content, item.nodeValue()) | ||||
| 	} | ||||
| 	return nodeForSequence(content) | ||||
| } | ||||
|  | ||||
| func nodeForMapping(content []*yaml.Node) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:    yaml.MappingNode, | ||||
| 		Content: content, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForSequence(content []*yaml.Node) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:    yaml.SequenceNode, | ||||
| 		Content: content, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForString(value string) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!str", | ||||
| 		Value: value, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForBoolean(value bool) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!bool", | ||||
| 		Value: fmt.Sprintf("%t", value), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForInt64(value int64) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!int", | ||||
| 		Value: fmt.Sprintf("%d", value), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func nodeForFloat64(value float64) *yaml.Node { | ||||
| 	return &yaml.Node{ | ||||
| 		Kind:  yaml.ScalarNode, | ||||
| 		Tag:   "!!float", | ||||
| 		Value: fmt.Sprintf("%f", value), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { | ||||
| 	nodes = append(nodes, nodeForString(name)) | ||||
| 	nodes = append(nodes, value) | ||||
| 	return nodes | ||||
| } | ||||
|  | ||||
| func (schema *Schema) nodeValue() *yaml.Node { | ||||
| 	n := &yaml.Node{Kind: yaml.MappingNode} | ||||
| 	content := make([]*yaml.Node, 0) | ||||
| 	if schema.Title != nil { | ||||
| 		content = appendPair(content, "title", nodeForString(*schema.Title)) | ||||
| 	} | ||||
| 	if schema.ID != nil { | ||||
| 		content = appendPair(content, "id", nodeForString(*schema.ID)) | ||||
| 	} | ||||
| 	if schema.Schema != nil { | ||||
| 		content = appendPair(content, "$schema", nodeForString(*schema.Schema)) | ||||
| 	} | ||||
| 	if schema.Type != nil { | ||||
| 		content = appendPair(content, "type", schema.Type.nodeValue()) | ||||
| 	} | ||||
| 	if schema.Items != nil { | ||||
| 		content = appendPair(content, "items", schema.Items.nodeValue()) | ||||
| 	} | ||||
| 	if schema.Description != nil { | ||||
| 		content = appendPair(content, "description", nodeForString(*schema.Description)) | ||||
| 	} | ||||
| 	if schema.Required != nil { | ||||
| 		content = appendPair(content, "required", nodeForStringArray(*schema.Required)) | ||||
| 	} | ||||
| 	if schema.AdditionalProperties != nil { | ||||
| 		content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) | ||||
| 	} | ||||
| 	if schema.PatternProperties != nil { | ||||
| 		content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) | ||||
| 	} | ||||
| 	if schema.Properties != nil { | ||||
| 		content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) | ||||
| 	} | ||||
| 	if schema.Dependencies != nil { | ||||
| 		content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) | ||||
| 	} | ||||
| 	if schema.Ref != nil { | ||||
| 		content = appendPair(content, "$ref", nodeForString(*schema.Ref)) | ||||
| 	} | ||||
| 	if schema.MultipleOf != nil { | ||||
| 		content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) | ||||
| 	} | ||||
| 	if schema.Maximum != nil { | ||||
| 		content = appendPair(content, "maximum", schema.Maximum.nodeValue()) | ||||
| 	} | ||||
| 	if schema.ExclusiveMaximum != nil { | ||||
| 		content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) | ||||
| 	} | ||||
| 	if schema.Minimum != nil { | ||||
| 		content = appendPair(content, "minimum", schema.Minimum.nodeValue()) | ||||
| 	} | ||||
| 	if schema.ExclusiveMinimum != nil { | ||||
| 		content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) | ||||
| 	} | ||||
| 	if schema.MaxLength != nil { | ||||
| 		content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) | ||||
| 	} | ||||
| 	if schema.MinLength != nil { | ||||
| 		content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) | ||||
| 	} | ||||
| 	if schema.Pattern != nil { | ||||
| 		content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) | ||||
| 	} | ||||
| 	if schema.AdditionalItems != nil { | ||||
| 		content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) | ||||
| 	} | ||||
| 	if schema.MaxItems != nil { | ||||
| 		content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) | ||||
| 	} | ||||
| 	if schema.MinItems != nil { | ||||
| 		content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) | ||||
| 	} | ||||
| 	if schema.UniqueItems != nil { | ||||
| 		content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) | ||||
| 	} | ||||
| 	if schema.MaxProperties != nil { | ||||
| 		content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) | ||||
| 	} | ||||
| 	if schema.MinProperties != nil { | ||||
| 		content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) | ||||
| 	} | ||||
| 	if schema.Enumeration != nil { | ||||
| 		content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) | ||||
| 	} | ||||
| 	if schema.AllOf != nil { | ||||
| 		content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) | ||||
| 	} | ||||
| 	if schema.AnyOf != nil { | ||||
| 		content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) | ||||
| 	} | ||||
| 	if schema.OneOf != nil { | ||||
| 		content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) | ||||
| 	} | ||||
| 	if schema.Not != nil { | ||||
| 		content = appendPair(content, "not", schema.Not.nodeValue()) | ||||
| 	} | ||||
| 	if schema.Definitions != nil { | ||||
| 		content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) | ||||
| 	} | ||||
| 	if schema.Default != nil { | ||||
| 		// m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) | ||||
| 	} | ||||
| 	if schema.Format != nil { | ||||
| 		content = appendPair(content, "format", nodeForString(*schema.Format)) | ||||
| 	} | ||||
| 	n.Content = content | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // JSONString returns a json representation of a schema. | ||||
| func (schema *Schema) JSONString() string { | ||||
| 	node := schema.nodeValue() | ||||
| 	return Render(node) | ||||
| } | ||||
							
								
								
									
										92
									
								
								vendor/github.com/modern-go/reflect2/safe_slice.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										92
									
								
								vendor/github.com/modern-go/reflect2/safe_slice.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,92 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type safeSliceType struct { | ||||
| 	safeType | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) { | ||||
| 	val := reflect.ValueOf(obj).Elem() | ||||
| 	elem := reflect.ValueOf(value).Elem() | ||||
| 	val.Index(index).Set(elem) | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} { | ||||
| 	val := reflect.ValueOf(obj).Elem() | ||||
| 	elem := val.Index(index) | ||||
| 	ptr := reflect.New(elem.Type()) | ||||
| 	ptr.Elem().Set(elem) | ||||
| 	return ptr.Interface() | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} { | ||||
| 	val := reflect.MakeSlice(type2.Type, length, cap) | ||||
| 	ptr := reflect.New(val.Type()) | ||||
| 	ptr.Elem().Set(val) | ||||
| 	return ptr.Interface() | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) Grow(obj interface{}, newLength int) { | ||||
| 	oldCap := type2.Cap(obj) | ||||
| 	oldSlice := reflect.ValueOf(obj).Elem() | ||||
| 	delta := newLength - oldCap | ||||
| 	deltaVals := make([]reflect.Value, delta) | ||||
| 	newSlice := reflect.Append(oldSlice, deltaVals...) | ||||
| 	oldSlice.Set(newSlice) | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) { | ||||
| 	val := reflect.ValueOf(obj).Elem() | ||||
| 	elemVal := reflect.ValueOf(elem).Elem() | ||||
| 	newVal := reflect.Append(val, elemVal) | ||||
| 	val.Set(newVal) | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) SetNil(obj interface{}) { | ||||
| 	val := reflect.ValueOf(obj).Elem() | ||||
| 	val.Set(reflect.Zero(val.Type())) | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) LengthOf(obj interface{}) int { | ||||
| 	return reflect.ValueOf(obj).Elem().Len() | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) Cap(obj interface{}) int { | ||||
| 	return reflect.ValueOf(obj).Elem().Cap() | ||||
| } | ||||
|  | ||||
| func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/github.com/modern-go/reflect2/safe_struct.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/modern-go/reflect2/safe_struct.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,29 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| type safeStructType struct { | ||||
| 	safeType | ||||
| } | ||||
|  | ||||
| func (type2 *safeStructType) FieldByName(name string) StructField { | ||||
| 	field, found := type2.Type.FieldByName(name) | ||||
| 	if !found { | ||||
| 		panic("field " + name + " not found") | ||||
| 	} | ||||
| 	return &safeField{StructField: field} | ||||
| } | ||||
|  | ||||
| func (type2 *safeStructType) Field(i int) StructField { | ||||
| 	return &safeField{StructField: type2.Type.Field(i)} | ||||
| } | ||||
|  | ||||
| func (type2 *safeStructType) FieldByIndex(index []int) StructField { | ||||
| 	return &safeField{StructField: type2.Type.FieldByIndex(index)} | ||||
| } | ||||
|  | ||||
| func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField { | ||||
| 	field, found := type2.Type.FieldByNameFunc(match) | ||||
| 	if !found { | ||||
| 		panic("field match condition not found in " + type2.Type.String()) | ||||
| 	} | ||||
| 	return &safeField{StructField: field} | ||||
| } | ||||
							
								
								
									
										78
									
								
								vendor/github.com/modern-go/reflect2/safe_type.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										78
									
								
								vendor/github.com/modern-go/reflect2/safe_type.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,78 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type safeType struct { | ||||
| 	reflect.Type | ||||
| 	cfg *frozenConfig | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) New() interface{} { | ||||
| 	return reflect.New(type2.Type).Interface() | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) UnsafeNew() unsafe.Pointer { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) Elem() Type { | ||||
| 	return type2.cfg.Type2(type2.Type.Elem()) | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) Type1() reflect.Type { | ||||
| 	return type2.Type | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) Implements(thatType Type) bool { | ||||
| 	return type2.Type.Implements(thatType.Type1()) | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) RType() uintptr { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) Indirect(obj interface{}) interface{} { | ||||
| 	return reflect.Indirect(reflect.ValueOf(obj)).Interface() | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) LikePtr() bool { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) IsNullable() bool { | ||||
| 	return IsNullable(type2.Kind()) | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return reflect.ValueOf(obj).Elem().IsNil() | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) Set(obj interface{}, val interface{}) { | ||||
| 	reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem()) | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { | ||||
| 	panic("does not support unsafe operation") | ||||
| } | ||||
|  | ||||
| func (type2 *safeType) AssignableTo(anotherType Type) bool { | ||||
| 	return type2.Type1().AssignableTo(anotherType.Type1()) | ||||
| } | ||||
							
								
								
									
										12
									
								
								vendor/github.com/modern-go/reflect2/test.sh
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/modern-go/reflect2/test.sh
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,12 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -e | ||||
| echo "" > coverage.txt | ||||
|  | ||||
| for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do | ||||
|     go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d | ||||
|     if [ -f profile.out ]; then | ||||
|         cat profile.out >> coverage.txt | ||||
|         rm profile.out | ||||
|     fi | ||||
| done | ||||
							
								
								
									
										113
									
								
								vendor/github.com/modern-go/reflect2/type_map.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										113
									
								
								vendor/github.com/modern-go/reflect2/type_map.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,113 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| // typelinks1 for 1.5 ~ 1.6 | ||||
| //go:linkname typelinks1 reflect.typelinks | ||||
| func typelinks1() [][]unsafe.Pointer | ||||
|  | ||||
| // typelinks2 for 1.7 ~ | ||||
| //go:linkname typelinks2 reflect.typelinks | ||||
| func typelinks2() (sections []unsafe.Pointer, offset [][]int32) | ||||
|  | ||||
| // initOnce guards initialization of types and packages | ||||
| var initOnce sync.Once | ||||
|  | ||||
| var types map[string]reflect.Type | ||||
| var packages map[string]map[string]reflect.Type | ||||
|  | ||||
| // discoverTypes initializes types and packages | ||||
| func discoverTypes() { | ||||
| 	types = make(map[string]reflect.Type) | ||||
| 	packages = make(map[string]map[string]reflect.Type) | ||||
|  | ||||
| 	ver := runtime.Version() | ||||
| 	if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { | ||||
| 		loadGo15Types() | ||||
| 	} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { | ||||
| 		loadGo15Types() | ||||
| 	} else { | ||||
| 		loadGo17Types() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func loadGo15Types() { | ||||
| 	var obj interface{} = reflect.TypeOf(0) | ||||
| 	typePtrss := typelinks1() | ||||
| 	for _, typePtrs := range typePtrss { | ||||
| 		for _, typePtr := range typePtrs { | ||||
| 			(*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr | ||||
| 			typ := obj.(reflect.Type) | ||||
| 			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { | ||||
| 				loadedType := typ.Elem() | ||||
| 				pkgTypes := packages[loadedType.PkgPath()] | ||||
| 				if pkgTypes == nil { | ||||
| 					pkgTypes = map[string]reflect.Type{} | ||||
| 					packages[loadedType.PkgPath()] = pkgTypes | ||||
| 				} | ||||
| 				types[loadedType.String()] = loadedType | ||||
| 				pkgTypes[loadedType.Name()] = loadedType | ||||
| 			} | ||||
| 			if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && | ||||
| 				typ.Elem().Elem().Kind() == reflect.Struct { | ||||
| 				loadedType := typ.Elem().Elem() | ||||
| 				pkgTypes := packages[loadedType.PkgPath()] | ||||
| 				if pkgTypes == nil { | ||||
| 					pkgTypes = map[string]reflect.Type{} | ||||
| 					packages[loadedType.PkgPath()] = pkgTypes | ||||
| 				} | ||||
| 				types[loadedType.String()] = loadedType | ||||
| 				pkgTypes[loadedType.Name()] = loadedType | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func loadGo17Types() { | ||||
| 	var obj interface{} = reflect.TypeOf(0) | ||||
| 	sections, offset := typelinks2() | ||||
| 	for i, offs := range offset { | ||||
| 		rodata := sections[i] | ||||
| 		for _, off := range offs { | ||||
| 			(*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off) | ||||
| 			typ := obj.(reflect.Type) | ||||
| 			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { | ||||
| 				loadedType := typ.Elem() | ||||
| 				pkgTypes := packages[loadedType.PkgPath()] | ||||
| 				if pkgTypes == nil { | ||||
| 					pkgTypes = map[string]reflect.Type{} | ||||
| 					packages[loadedType.PkgPath()] = pkgTypes | ||||
| 				} | ||||
| 				types[loadedType.String()] = loadedType | ||||
| 				pkgTypes[loadedType.Name()] = loadedType | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type emptyInterface struct { | ||||
| 	typ  unsafe.Pointer | ||||
| 	word unsafe.Pointer | ||||
| } | ||||
|  | ||||
| // TypeByName return the type by its name, just like Class.forName in java | ||||
| func TypeByName(typeName string) Type { | ||||
| 	initOnce.Do(discoverTypes) | ||||
| 	return Type2(types[typeName]) | ||||
| } | ||||
|  | ||||
| // TypeByPackageName return the type by its package and name | ||||
| func TypeByPackageName(pkgPath string, name string) Type { | ||||
| 	initOnce.Do(discoverTypes) | ||||
| 	pkgTypes := packages[pkgPath] | ||||
| 	if pkgTypes == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return Type2(pkgTypes[name]) | ||||
| } | ||||
							
								
								
									
										65
									
								
								vendor/github.com/modern-go/reflect2/unsafe_array.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										65
									
								
								vendor/github.com/modern-go/reflect2/unsafe_array.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,65 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type UnsafeArrayType struct { | ||||
| 	unsafeType | ||||
| 	elemRType  unsafe.Pointer | ||||
| 	pElemRType unsafe.Pointer | ||||
| 	elemSize   uintptr | ||||
| 	likePtr    bool | ||||
| } | ||||
|  | ||||
| func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType { | ||||
| 	return &UnsafeArrayType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 		elemRType:  unpackEFace(type1.Elem()).data, | ||||
| 		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, | ||||
| 		elemSize:   type1.Elem().Size(), | ||||
| 		likePtr:    likePtrType(type1), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) LikePtr() bool { | ||||
| 	return type2.likePtr | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	if type2.likePtr { | ||||
| 		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) | ||||
| 	} | ||||
| 	return packEFace(type2.rtype, ptr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	elemEFace := unpackEFace(elem) | ||||
| 	assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) | ||||
| 	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { | ||||
| 	elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len") | ||||
| 	typedmemmove(type2.elemRType, elemPtr, elem) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	elemPtr := type2.UnsafeGetIndex(objEFace.data, index) | ||||
| 	return packEFace(type2.pElemRType, elemPtr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { | ||||
| 	return arrayAt(obj, index, type2.elemSize, "i < s.Len") | ||||
| } | ||||
							
								
								
									
										59
									
								
								vendor/github.com/modern-go/reflect2/unsafe_eface.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										59
									
								
								vendor/github.com/modern-go/reflect2/unsafe_eface.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,59 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type eface struct { | ||||
| 	rtype unsafe.Pointer | ||||
| 	data  unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func unpackEFace(obj interface{}) *eface { | ||||
| 	return (*eface)(unsafe.Pointer(&obj)) | ||||
| } | ||||
|  | ||||
| func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} { | ||||
| 	var i interface{} | ||||
| 	e := (*eface)(unsafe.Pointer(&i)) | ||||
| 	e.rtype = rtype | ||||
| 	e.data = data | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| type UnsafeEFaceType struct { | ||||
| 	unsafeType | ||||
| } | ||||
|  | ||||
| func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType { | ||||
| 	return &UnsafeEFaceType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	if ptr == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return unpackEFace(*(*interface{})(ptr)).data == nil | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	return *(*interface{})(ptr) | ||||
| } | ||||
							
								
								
									
										74
									
								
								vendor/github.com/modern-go/reflect2/unsafe_field.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								vendor/github.com/modern-go/reflect2/unsafe_field.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,74 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type UnsafeStructField struct { | ||||
| 	reflect.StructField | ||||
| 	structType *UnsafeStructType | ||||
| 	rtype      unsafe.Pointer | ||||
| 	ptrRType   unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField { | ||||
| 	return &UnsafeStructField{ | ||||
| 		StructField: structField, | ||||
| 		rtype:       unpackEFace(structField.Type).data, | ||||
| 		ptrRType:    unpackEFace(reflect.PtrTo(structField.Type)).data, | ||||
| 		structType:  structType, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Offset() uintptr { | ||||
| 	return field.StructField.Offset | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Name() string { | ||||
| 	return field.StructField.Name | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) PkgPath() string { | ||||
| 	return field.StructField.PkgPath | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Type() Type { | ||||
| 	return field.structType.cfg.Type2(field.StructField.Type) | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Tag() reflect.StructTag { | ||||
| 	return field.StructField.Tag | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Index() []int { | ||||
| 	return field.StructField.Index | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Anonymous() bool { | ||||
| 	return field.StructField.Anonymous | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Set(obj interface{}, value interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype) | ||||
| 	valueEFace := unpackEFace(value) | ||||
| 	assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype) | ||||
| 	field.UnsafeSet(objEFace.data, valueEFace.data) | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) { | ||||
| 	fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field") | ||||
| 	typedmemmove(field.rtype, fieldPtr, value) | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) Get(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype) | ||||
| 	value := field.UnsafeGet(objEFace.data) | ||||
| 	return packEFace(field.ptrRType, value) | ||||
| } | ||||
|  | ||||
| func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer { | ||||
| 	return add(obj, field.StructField.Offset, "same as non-reflect &v.field") | ||||
| } | ||||
							
								
								
									
										64
									
								
								vendor/github.com/modern-go/reflect2/unsafe_iface.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/modern-go/reflect2/unsafe_iface.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,64 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type iface struct { | ||||
| 	itab *itab | ||||
| 	data unsafe.Pointer | ||||
| } | ||||
|  | ||||
| type itab struct { | ||||
| 	ignore unsafe.Pointer | ||||
| 	rtype  unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func IFaceToEFace(ptr unsafe.Pointer) interface{} { | ||||
| 	iface := (*iface)(ptr) | ||||
| 	if iface.itab == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return packEFace(iface.itab.rtype, iface.data) | ||||
| } | ||||
|  | ||||
| type UnsafeIFaceType struct { | ||||
| 	unsafeType | ||||
| } | ||||
|  | ||||
| func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType { | ||||
| 	return &UnsafeIFaceType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	return IFaceToEFace(ptr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	if ptr == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	iface := (*iface)(ptr) | ||||
| 	if iface.itab == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										70
									
								
								vendor/github.com/modern-go/reflect2/unsafe_link.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/modern-go/reflect2/unsafe_link.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,70 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import "unsafe" | ||||
|  | ||||
| //go:linkname unsafe_New reflect.unsafe_New | ||||
| func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer | ||||
|  | ||||
| //go:linkname typedmemmove reflect.typedmemmove | ||||
| func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer) | ||||
|  | ||||
| //go:linkname unsafe_NewArray reflect.unsafe_NewArray | ||||
| func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer | ||||
|  | ||||
| // typedslicecopy copies a slice of elemType values from src to dst, | ||||
| // returning the number of elements copied. | ||||
| //go:linkname typedslicecopy reflect.typedslicecopy | ||||
| //go:noescape | ||||
| func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int | ||||
|  | ||||
| //go:linkname mapassign reflect.mapassign | ||||
| //go:noescape | ||||
| func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) | ||||
|  | ||||
| //go:linkname mapaccess reflect.mapaccess | ||||
| //go:noescape | ||||
| func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) | ||||
|  | ||||
| // m escapes into the return value, but the caller of mapiterinit | ||||
| // doesn't let the return value escape. | ||||
| //go:noescape | ||||
| //go:linkname mapiterinit reflect.mapiterinit | ||||
| func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter | ||||
|  | ||||
| //go:noescape | ||||
| //go:linkname mapiternext reflect.mapiternext | ||||
| func mapiternext(it *hiter) | ||||
|  | ||||
| //go:linkname ifaceE2I reflect.ifaceE2I | ||||
| func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) | ||||
|  | ||||
| // A hash iteration structure. | ||||
| // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate | ||||
| // the layout of this structure. | ||||
| type hiter struct { | ||||
| 	key   unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go). | ||||
| 	value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). | ||||
| 	// rest fields are ignored | ||||
| } | ||||
|  | ||||
| // add returns p+x. | ||||
| // | ||||
| // The whySafe string is ignored, so that the function still inlines | ||||
| // as efficiently as p+x, but all call sites should use the string to | ||||
| // record why the addition is safe, which is to say why the addition | ||||
| // does not cause x to advance to the very end of p's allocation | ||||
| // and therefore point incorrectly at the next block in memory. | ||||
| func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { | ||||
| 	return unsafe.Pointer(uintptr(p) + x) | ||||
| } | ||||
|  | ||||
| // arrayAt returns the i-th element of p, | ||||
| // an array whose elements are eltSize bytes wide. | ||||
| // The array pointed at by p must have at least i+1 elements: | ||||
| // it is invalid (but impossible to check here) to pass i >= len, | ||||
| // because then the result will point outside the array. | ||||
| // whySafe must explain why i < len. (Passing "i < len" is fine; | ||||
| // the benefit is to surface this assumption at the call site.) | ||||
| func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer { | ||||
| 	return add(p, uintptr(i)*eltSize, "i < len") | ||||
| } | ||||
							
								
								
									
										138
									
								
								vendor/github.com/modern-go/reflect2/unsafe_map.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										138
									
								
								vendor/github.com/modern-go/reflect2/unsafe_map.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,138 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type UnsafeMapType struct { | ||||
| 	unsafeType | ||||
| 	pKeyRType  unsafe.Pointer | ||||
| 	pElemRType unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType { | ||||
| 	return &UnsafeMapType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 		pKeyRType:  unpackEFace(reflect.PtrTo(type1.Key())).data, | ||||
| 		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	if ptr == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return *(*unsafe.Pointer)(ptr) == nil | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) LikePtr() bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) Key() Type { | ||||
| 	return type2.cfg.Type2(type2.Type.Key()) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) MakeMap(cap int) interface{} { | ||||
| 	return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap)) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer { | ||||
| 	m := makeMapWithSize(type2.rtype, cap) | ||||
| 	return unsafe.Pointer(&m) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	keyEFace := unpackEFace(key) | ||||
| 	assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype) | ||||
| 	elemEFace := unpackEFace(elem) | ||||
| 	assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) | ||||
| 	type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) { | ||||
| 	mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	keyEFace := unpackEFace(key) | ||||
| 	assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype) | ||||
| 	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) | ||||
| 	if elemPtr == nil { | ||||
| 		return nil, false | ||||
| 	} | ||||
| 	return packEFace(type2.pElemRType, elemPtr), true | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	keyEFace := unpackEFace(key) | ||||
| 	assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype) | ||||
| 	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) | ||||
| 	return packEFace(type2.pElemRType, elemPtr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer { | ||||
| 	return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIterate(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { | ||||
| 	return &UnsafeMapIterator{ | ||||
| 		hiter:      mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), | ||||
| 		pKeyRType:  type2.pKeyRType, | ||||
| 		pElemRType: type2.pElemRType, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type UnsafeMapIterator struct { | ||||
| 	*hiter | ||||
| 	pKeyRType  unsafe.Pointer | ||||
| 	pElemRType unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func (iter *UnsafeMapIterator) HasNext() bool { | ||||
| 	return iter.key != nil | ||||
| } | ||||
|  | ||||
| func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) { | ||||
| 	key, elem := iter.UnsafeNext() | ||||
| 	return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem) | ||||
| } | ||||
|  | ||||
| func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) { | ||||
| 	key := iter.key | ||||
| 	elem := iter.value | ||||
| 	mapiternext(iter.hiter) | ||||
| 	return key, elem | ||||
| } | ||||
							
								
								
									
										46
									
								
								vendor/github.com/modern-go/reflect2/unsafe_ptr.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/modern-go/reflect2/unsafe_ptr.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,46 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type UnsafePtrType struct { | ||||
| 	unsafeType | ||||
| } | ||||
|  | ||||
| func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType { | ||||
| 	return &UnsafePtrType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafePtrType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	if ptr == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return *(*unsafe.Pointer)(ptr) == nil | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafePtrType) LikePtr() bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) | ||||
| } | ||||
							
								
								
									
										177
									
								
								vendor/github.com/modern-go/reflect2/unsafe_slice.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										177
									
								
								vendor/github.com/modern-go/reflect2/unsafe_slice.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,177 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| // sliceHeader is a safe version of SliceHeader used within this package. | ||||
| type sliceHeader struct { | ||||
| 	Data unsafe.Pointer | ||||
| 	Len  int | ||||
| 	Cap  int | ||||
| } | ||||
|  | ||||
| type UnsafeSliceType struct { | ||||
| 	unsafeType | ||||
| 	elemRType  unsafe.Pointer | ||||
| 	pElemRType unsafe.Pointer | ||||
| 	elemSize   uintptr | ||||
| } | ||||
|  | ||||
| func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType { | ||||
| 	elemType := type1.Elem() | ||||
| 	return &UnsafeSliceType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 		pElemRType: unpackEFace(reflect.PtrTo(elemType)).data, | ||||
| 		elemRType:  unpackEFace(elemType).data, | ||||
| 		elemSize:   elemType.Size(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	valEFace := unpackEFace(val) | ||||
| 	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) | ||||
| 	type2.UnsafeSet(objEFace.data, valEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { | ||||
| 	*(*sliceHeader)(ptr) = *(*sliceHeader)(val) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool { | ||||
| 	if obj == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	if ptr == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return (*sliceHeader)(ptr).Data == nil | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) SetNil(obj interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	type2.UnsafeSetNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { | ||||
| 	header := (*sliceHeader)(ptr) | ||||
| 	header.Len = 0 | ||||
| 	header.Cap = 0 | ||||
| 	header.Data = nil | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} { | ||||
| 	return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap)) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { | ||||
| 	header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap} | ||||
| 	return unsafe.Pointer(header) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeLengthOf(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int { | ||||
| 	header := (*sliceHeader)(obj) | ||||
| 	return header.Len | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	elemEFace := unpackEFace(elem) | ||||
| 	assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) | ||||
| 	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { | ||||
| 	header := (*sliceHeader)(obj) | ||||
| 	elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len") | ||||
| 	typedmemmove(type2.elemRType, elemPtr, elem) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	elemPtr := type2.UnsafeGetIndex(objEFace.data, index) | ||||
| 	return packEFace(type2.pElemRType, elemPtr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { | ||||
| 	header := (*sliceHeader)(obj) | ||||
| 	return arrayAt(header.Data, index, type2.elemSize, "i < s.Len") | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	elemEFace := unpackEFace(elem) | ||||
| 	assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype) | ||||
| 	type2.UnsafeAppend(objEFace.data, elemEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { | ||||
| 	header := (*sliceHeader)(obj) | ||||
| 	oldLen := header.Len | ||||
| 	type2.UnsafeGrow(obj, oldLen+1) | ||||
| 	type2.UnsafeSetIndex(obj, oldLen, elem) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) Cap(obj interface{}) int { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeCap(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int { | ||||
| 	return (*sliceHeader)(ptr).Cap | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	type2.UnsafeGrow(objEFace.data, newLength) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) { | ||||
| 	header := (*sliceHeader)(obj) | ||||
| 	if newLength <= header.Cap { | ||||
| 		header.Len = newLength | ||||
| 		return | ||||
| 	} | ||||
| 	newCap := calcNewCap(header.Cap, newLength) | ||||
| 	newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap)) | ||||
| 	typedslicecopy(type2.elemRType, *newHeader, *header) | ||||
| 	header.Data = newHeader.Data | ||||
| 	header.Cap = newHeader.Cap | ||||
| 	header.Len = newLength | ||||
| } | ||||
|  | ||||
| func calcNewCap(cap int, expectedCap int) int { | ||||
| 	if cap == 0 { | ||||
| 		cap = expectedCap | ||||
| 	} else { | ||||
| 		for cap < expectedCap { | ||||
| 			if cap < 1024 { | ||||
| 				cap += cap | ||||
| 			} else { | ||||
| 				cap += cap / 4 | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return cap | ||||
| } | ||||
							
								
								
									
										59
									
								
								vendor/github.com/modern-go/reflect2/unsafe_struct.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										59
									
								
								vendor/github.com/modern-go/reflect2/unsafe_struct.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,59 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type UnsafeStructType struct { | ||||
| 	unsafeType | ||||
| 	likePtr bool | ||||
| } | ||||
|  | ||||
| func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType { | ||||
| 	return &UnsafeStructType{ | ||||
| 		unsafeType: *newUnsafeType(cfg, type1), | ||||
| 		likePtr:    likePtrType(type1), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) LikePtr() bool { | ||||
| 	return type2.likePtr | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { | ||||
| 	if type2.likePtr { | ||||
| 		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) | ||||
| 	} | ||||
| 	return packEFace(type2.rtype, ptr) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) FieldByName(name string) StructField { | ||||
| 	structField, found := type2.Type.FieldByName(name) | ||||
| 	if !found { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return newUnsafeStructField(type2, structField) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) Field(i int) StructField { | ||||
| 	return newUnsafeStructField(type2, type2.Type.Field(i)) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField { | ||||
| 	return newUnsafeStructField(type2, type2.Type.FieldByIndex(index)) | ||||
| } | ||||
|  | ||||
| func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField { | ||||
| 	structField, found := type2.Type.FieldByNameFunc(match) | ||||
| 	if !found { | ||||
| 		panic("field match condition not found in " + type2.Type.String()) | ||||
| 	} | ||||
| 	return newUnsafeStructField(type2, structField) | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/github.com/modern-go/reflect2/unsafe_type.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/modern-go/reflect2/unsafe_type.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,85 +0,0 @@ | ||||
| package reflect2 | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| type unsafeType struct { | ||||
| 	safeType | ||||
| 	rtype    unsafe.Pointer | ||||
| 	ptrRType unsafe.Pointer | ||||
| } | ||||
|  | ||||
| func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType { | ||||
| 	return &unsafeType{ | ||||
| 		safeType: safeType{ | ||||
| 			Type: type1, | ||||
| 			cfg:  cfg, | ||||
| 		}, | ||||
| 		rtype:    unpackEFace(type1).data, | ||||
| 		ptrRType: unpackEFace(reflect.PtrTo(type1)).data, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) Set(obj interface{}, val interface{}) { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	valEFace := unpackEFace(val) | ||||
| 	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) | ||||
| 	type2.UnsafeSet(objEFace.data, valEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { | ||||
| 	typedmemmove(type2.rtype, ptr, val) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) IsNil(obj interface{}) bool { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIsNil(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool { | ||||
| 	return ptr == nil | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) UnsafeNew() unsafe.Pointer { | ||||
| 	return unsafe_New(type2.rtype) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) New() interface{} { | ||||
| 	return packEFace(type2.ptrRType, type2.UnsafeNew()) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} { | ||||
| 	return packEFace(type2.ptrRType, ptr) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) RType() uintptr { | ||||
| 	return uintptr(type2.rtype) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) Indirect(obj interface{}) interface{} { | ||||
| 	objEFace := unpackEFace(obj) | ||||
| 	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) | ||||
| 	return type2.UnsafeIndirect(objEFace.data) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} { | ||||
| 	return packEFace(type2.rtype, obj) | ||||
| } | ||||
|  | ||||
| func (type2 *unsafeType) LikePtr() bool { | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) { | ||||
| 	if expectRType != actualRType { | ||||
| 		expectType := reflect.TypeOf(0) | ||||
| 		(*iface)(unsafe.Pointer(&expectType)).data = expectRType | ||||
| 		actualType := reflect.TypeOf(0) | ||||
| 		(*iface)(unsafe.Pointer(&actualType)).data = actualRType | ||||
| 		panic(where + ": expect " + expectType.String() + ", actual " + actualType.String()) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										201
									
								
								vendor/github.com/prometheus/client_golang/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/prometheus/client_golang/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,201 +0,0 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										23
									
								
								vendor/github.com/prometheus/client_golang/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/prometheus/client_golang/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,23 +0,0 @@ | ||||
| Prometheus instrumentation library for Go applications | ||||
| Copyright 2012-2015 The Prometheus Authors | ||||
|  | ||||
| This product includes software developed at | ||||
| SoundCloud Ltd. (http://soundcloud.com/). | ||||
|  | ||||
|  | ||||
| The following components are included in this product: | ||||
|  | ||||
| perks - a fork of https://github.com/bmizerany/perks | ||||
| https://github.com/beorn7/perks | ||||
| Copyright 2013-2015 Blake Mizerany, Björn Rabenstein | ||||
| See https://github.com/beorn7/perks/blob/master/README.md for license details. | ||||
|  | ||||
| Go support for Protocol Buffers - Google's data interchange format | ||||
| http://github.com/golang/protobuf/ | ||||
| Copyright 2010 The Go Authors | ||||
| See source code for license details. | ||||
|  | ||||
| Support for streaming Protocol Buffer messages for the Go language (golang). | ||||
| https://github.com/matttproud/golang_protobuf_extensions | ||||
| Copyright 2013 Matt T. Proud | ||||
| Licensed under the Apache License, Version 2.0 | ||||
							
								
								
									
										1
									
								
								vendor/github.com/prometheus/client_golang/prometheus/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/prometheus/client_golang/prometheus/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1 +0,0 @@ | ||||
| command-line-arguments.test | ||||
							
								
								
									
										1
									
								
								vendor/github.com/prometheus/client_golang/prometheus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/prometheus/client_golang/prometheus/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1 +0,0 @@ | ||||
| See [](https://godoc.org/github.com/prometheus/client_golang/prometheus). | ||||
							
								
								
									
										29
									
								
								vendor/github.com/prometheus/client_golang/prometheus/build_info.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/prometheus/client_golang/prometheus/build_info.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,29 +0,0 @@ | ||||
| // Copyright 2019 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // +build go1.12 | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import "runtime/debug" | ||||
|  | ||||
| // readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. | ||||
| func readBuildInfo() (path, version, sum string) { | ||||
| 	path, version, sum = "unknown", "unknown", "unknown" | ||||
| 	if bi, ok := debug.ReadBuildInfo(); ok { | ||||
| 		path = bi.Main.Path | ||||
| 		version = bi.Main.Version | ||||
| 		sum = bi.Main.Sum | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										22
									
								
								vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										22
									
								
								vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,22 +0,0 @@ | ||||
| // Copyright 2019 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // +build !go1.12 | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| // readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before | ||||
| // 1.12. Remove this whole file once the minimum supported Go version is 1.12. | ||||
| func readBuildInfo() (path, version, sum string) { | ||||
| 	return "unknown", "unknown", "unknown" | ||||
| } | ||||
							
								
								
									
										120
									
								
								vendor/github.com/prometheus/client_golang/prometheus/collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										120
									
								
								vendor/github.com/prometheus/client_golang/prometheus/collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,120 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| // Collector is the interface implemented by anything that can be used by | ||||
| // Prometheus to collect metrics. A Collector has to be registered for | ||||
| // collection. See Registerer.Register. | ||||
| // | ||||
| // The stock metrics provided by this package (Gauge, Counter, Summary, | ||||
| // Histogram, Untyped) are also Collectors (which only ever collect one metric, | ||||
| // namely itself). An implementer of Collector may, however, collect multiple | ||||
| // metrics in a coordinated fashion and/or create metrics on the fly. Examples | ||||
| // for collectors already implemented in this library are the metric vectors | ||||
| // (i.e. collection of multiple instances of the same Metric but with different | ||||
| // label values) like GaugeVec or SummaryVec, and the ExpvarCollector. | ||||
| type Collector interface { | ||||
| 	// Describe sends the super-set of all possible descriptors of metrics | ||||
| 	// collected by this Collector to the provided channel and returns once | ||||
| 	// the last descriptor has been sent. The sent descriptors fulfill the | ||||
| 	// consistency and uniqueness requirements described in the Desc | ||||
| 	// documentation. | ||||
| 	// | ||||
| 	// It is valid if one and the same Collector sends duplicate | ||||
| 	// descriptors. Those duplicates are simply ignored. However, two | ||||
| 	// different Collectors must not send duplicate descriptors. | ||||
| 	// | ||||
| 	// Sending no descriptor at all marks the Collector as “unchecked”, | ||||
| 	// i.e. no checks will be performed at registration time, and the | ||||
| 	// Collector may yield any Metric it sees fit in its Collect method. | ||||
| 	// | ||||
| 	// This method idempotently sends the same descriptors throughout the | ||||
| 	// lifetime of the Collector. It may be called concurrently and | ||||
| 	// therefore must be implemented in a concurrency safe way. | ||||
| 	// | ||||
| 	// If a Collector encounters an error while executing this method, it | ||||
| 	// must send an invalid descriptor (created with NewInvalidDesc) to | ||||
| 	// signal the error to the registry. | ||||
| 	Describe(chan<- *Desc) | ||||
| 	// Collect is called by the Prometheus registry when collecting | ||||
| 	// metrics. The implementation sends each collected metric via the | ||||
| 	// provided channel and returns once the last metric has been sent. The | ||||
| 	// descriptor of each sent metric is one of those returned by Describe | ||||
| 	// (unless the Collector is unchecked, see above). Returned metrics that | ||||
| 	// share the same descriptor must differ in their variable label | ||||
| 	// values. | ||||
| 	// | ||||
| 	// This method may be called concurrently and must therefore be | ||||
| 	// implemented in a concurrency safe way. Blocking occurs at the expense | ||||
| 	// of total performance of rendering all registered metrics. Ideally, | ||||
| 	// Collector implementations support concurrent readers. | ||||
| 	Collect(chan<- Metric) | ||||
| } | ||||
|  | ||||
| // DescribeByCollect is a helper to implement the Describe method of a custom | ||||
| // Collector. It collects the metrics from the provided Collector and sends | ||||
| // their descriptors to the provided channel. | ||||
| // | ||||
| // If a Collector collects the same metrics throughout its lifetime, its | ||||
| // Describe method can simply be implemented as: | ||||
| // | ||||
| //   func (c customCollector) Describe(ch chan<- *Desc) { | ||||
| //   	DescribeByCollect(c, ch) | ||||
| //   } | ||||
| // | ||||
| // However, this will not work if the metrics collected change dynamically over | ||||
| // the lifetime of the Collector in a way that their combined set of descriptors | ||||
| // changes as well. The shortcut implementation will then violate the contract | ||||
| // of the Describe method. If a Collector sometimes collects no metrics at all | ||||
| // (for example vectors like CounterVec, GaugeVec, etc., which only collect | ||||
| // metrics after a metric with a fully specified label set has been accessed), | ||||
| // it might even get registered as an unchecked Collector (cf. the Register | ||||
| // method of the Registerer interface). Hence, only use this shortcut | ||||
| // implementation of Describe if you are certain to fulfill the contract. | ||||
| // | ||||
| // The Collector example demonstrates a use of DescribeByCollect. | ||||
| func DescribeByCollect(c Collector, descs chan<- *Desc) { | ||||
| 	metrics := make(chan Metric) | ||||
| 	go func() { | ||||
| 		c.Collect(metrics) | ||||
| 		close(metrics) | ||||
| 	}() | ||||
| 	for m := range metrics { | ||||
| 		descs <- m.Desc() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // selfCollector implements Collector for a single Metric so that the Metric | ||||
| // collects itself. Add it as an anonymous field to a struct that implements | ||||
| // Metric, and call init with the Metric itself as an argument. | ||||
| type selfCollector struct { | ||||
| 	self Metric | ||||
| } | ||||
|  | ||||
| // init provides the selfCollector with a reference to the metric it is supposed | ||||
| // to collect. It is usually called within the factory function to create a | ||||
| // metric. See example. | ||||
| func (c *selfCollector) init(self Metric) { | ||||
| 	c.self = self | ||||
| } | ||||
|  | ||||
| // Describe implements Collector. | ||||
| func (c *selfCollector) Describe(ch chan<- *Desc) { | ||||
| 	ch <- c.self.Desc() | ||||
| } | ||||
|  | ||||
| // Collect implements Collector. | ||||
| func (c *selfCollector) Collect(ch chan<- Metric) { | ||||
| 	ch <- c.self | ||||
| } | ||||
							
								
								
									
										321
									
								
								vendor/github.com/prometheus/client_golang/prometheus/counter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										321
									
								
								vendor/github.com/prometheus/client_golang/prometheus/counter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,321 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"math" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // Counter is a Metric that represents a single numerical value that only ever | ||||
| // goes up. That implies that it cannot be used to count items whose number can | ||||
| // also go down, e.g. the number of currently running goroutines. Those | ||||
| // "counters" are represented by Gauges. | ||||
| // | ||||
| // A Counter is typically used to count requests served, tasks completed, errors | ||||
| // occurred, etc. | ||||
| // | ||||
| // To create Counter instances, use NewCounter. | ||||
| type Counter interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
|  | ||||
| 	// Inc increments the counter by 1. Use Add to increment it by arbitrary | ||||
| 	// non-negative values. | ||||
| 	Inc() | ||||
| 	// Add adds the given value to the counter. It panics if the value is < | ||||
| 	// 0. | ||||
| 	Add(float64) | ||||
| } | ||||
|  | ||||
| // ExemplarAdder is implemented by Counters that offer the option of adding a | ||||
| // value to the Counter together with an exemplar. Its AddWithExemplar method | ||||
| // works like the Add method of the Counter interface but also replaces the | ||||
| // currently saved exemplar (if any) with a new one, created from the provided | ||||
| // value, the current time as timestamp, and the provided labels. Empty Labels | ||||
| // will lead to a valid (label-less) exemplar. But if Labels is nil, the current | ||||
| // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any | ||||
| // of the provided labels are invalid, or if the provided labels contain more | ||||
| // than 64 runes in total. | ||||
| type ExemplarAdder interface { | ||||
| 	AddWithExemplar(value float64, exemplar Labels) | ||||
| } | ||||
|  | ||||
| // CounterOpts is an alias for Opts. See there for doc comments. | ||||
| type CounterOpts Opts | ||||
|  | ||||
| // NewCounter creates a new Counter based on the provided CounterOpts. | ||||
| // | ||||
| // The returned implementation also implements ExemplarAdder. It is safe to | ||||
| // perform the corresponding type assertion. | ||||
| // | ||||
| // The returned implementation tracks the counter value in two separate | ||||
| // variables, a float64 and a uint64. The latter is used to track calls of the | ||||
| // Inc method and calls of the Add method with a value that can be represented | ||||
| // as a uint64. This allows atomic increments of the counter with optimal | ||||
| // performance. (It is common to have an Inc call in very hot execution paths.) | ||||
| // Both internal tracking values are added up in the Write method. This has to | ||||
| // be taken into account when it comes to precision and overflow behavior. | ||||
| func NewCounter(opts CounterOpts) Counter { | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		nil, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} | ||||
| 	result.init(result) // Init self-collection. | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| type counter struct { | ||||
| 	// valBits contains the bits of the represented float64 value, while | ||||
| 	// valInt stores values that are exact integers. Both have to go first | ||||
| 	// in the struct to guarantee alignment for atomic operations. | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	valBits uint64 | ||||
| 	valInt  uint64 | ||||
|  | ||||
| 	selfCollector | ||||
| 	desc *Desc | ||||
|  | ||||
| 	labelPairs []*dto.LabelPair | ||||
| 	exemplar   atomic.Value // Containing nil or a *dto.Exemplar. | ||||
|  | ||||
| 	now func() time.Time // To mock out time.Now() for testing. | ||||
| } | ||||
|  | ||||
| func (c *counter) Desc() *Desc { | ||||
| 	return c.desc | ||||
| } | ||||
|  | ||||
| func (c *counter) Add(v float64) { | ||||
| 	if v < 0 { | ||||
| 		panic(errors.New("counter cannot decrease in value")) | ||||
| 	} | ||||
|  | ||||
| 	ival := uint64(v) | ||||
| 	if float64(ival) == v { | ||||
| 		atomic.AddUint64(&c.valInt, ival) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&c.valBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | ||||
| 		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *counter) AddWithExemplar(v float64, e Labels) { | ||||
| 	c.Add(v) | ||||
| 	c.updateExemplar(v, e) | ||||
| } | ||||
|  | ||||
| func (c *counter) Inc() { | ||||
| 	atomic.AddUint64(&c.valInt, 1) | ||||
| } | ||||
|  | ||||
| func (c *counter) Write(out *dto.Metric) error { | ||||
| 	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) | ||||
| 	ival := atomic.LoadUint64(&c.valInt) | ||||
| 	val := fval + float64(ival) | ||||
|  | ||||
| 	var exemplar *dto.Exemplar | ||||
| 	if e := c.exemplar.Load(); e != nil { | ||||
| 		exemplar = e.(*dto.Exemplar) | ||||
| 	} | ||||
|  | ||||
| 	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) | ||||
| } | ||||
|  | ||||
| func (c *counter) updateExemplar(v float64, l Labels) { | ||||
| 	if l == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	e, err := newExemplar(v, c.now(), l) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	c.exemplar.Store(e) | ||||
| } | ||||
|  | ||||
| // CounterVec is a Collector that bundles a set of Counters that all share the | ||||
| // same Desc, but have different values for their variable labels. This is used | ||||
| // if you want to count the same thing partitioned by various dimensions | ||||
| // (e.g. number of HTTP requests, partitioned by response code and | ||||
| // method). Create instances with NewCounterVec. | ||||
| type CounterVec struct { | ||||
| 	*MetricVec | ||||
| } | ||||
|  | ||||
| // NewCounterVec creates a new CounterVec based on the provided CounterOpts and | ||||
| // partitioned by the given label names. | ||||
| func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		labelNames, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	return &CounterVec{ | ||||
| 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { | ||||
| 			if len(lvs) != len(desc.variableLabels) { | ||||
| 				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) | ||||
| 			} | ||||
| 			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} | ||||
| 			result.init(result) // Init self-collection. | ||||
| 			return result | ||||
| 		}), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetMetricWithLabelValues returns the Counter for the given slice of label | ||||
| // values (same order as the variable labels in Desc). If that combination of | ||||
| // label values is accessed for the first time, a new Counter is created. | ||||
| // | ||||
| // It is possible to call this method without using the returned Counter to only | ||||
| // create the new Counter but leave it at its starting value 0. See also the | ||||
| // SummaryVec example. | ||||
| // | ||||
| // Keeping the Counter for later use is possible (and should be considered if | ||||
| // performance is critical), but keep in mind that Reset, DeleteLabelValues and | ||||
| // Delete can be used to delete the Counter from the CounterVec. In that case, | ||||
| // the Counter will still exist, but it will not be exported anymore, even if a | ||||
| // Counter with the same label values is created later. | ||||
| // | ||||
| // An error is returned if the number of label values is not the same as the | ||||
| // number of variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | ||||
| // an alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| // See also the GaugeVec example. | ||||
| func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Counter), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // GetMetricWith returns the Counter for the given Labels map (the label names | ||||
| // must match those of the variable labels in Desc). If that label map is | ||||
| // accessed for the first time, a new Counter is created. Implications of | ||||
| // creating a Counter without using it and keeping the Counter for later use are | ||||
| // the same as for GetMetricWithLabelValues. | ||||
| // | ||||
| // An error is returned if the number and names of the Labels are inconsistent | ||||
| // with those of the variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // This method is used for the same purpose as | ||||
| // GetMetricWithLabelValues(...string). See there for pros and cons of the two | ||||
| // methods. | ||||
| func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWith(labels) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Counter), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // WithLabelValues works as GetMetricWithLabelValues, but panics where | ||||
| // GetMetricWithLabelValues would have returned an error. Not returning an | ||||
| // error allows shortcuts like | ||||
| //     myVec.WithLabelValues("404", "GET").Add(42) | ||||
| func (v *CounterVec) WithLabelValues(lvs ...string) Counter { | ||||
| 	c, err := v.GetMetricWithLabelValues(lvs...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // With works as GetMetricWith, but panics where GetMetricWithLabels would have | ||||
| // returned an error. Not returning an error allows shortcuts like | ||||
| //     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) | ||||
| func (v *CounterVec) With(labels Labels) Counter { | ||||
| 	c, err := v.GetMetricWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // CurryWith returns a vector curried with the provided labels, i.e. the | ||||
| // returned vector has those labels pre-set for all labeled operations performed | ||||
| // on it. The cardinality of the curried vector is reduced accordingly. The | ||||
| // order of the remaining labels stays the same (just with the curried labels | ||||
| // taken out of the sequence – which is relevant for the | ||||
| // (GetMetric)WithLabelValues methods). It is possible to curry a curried | ||||
| // vector, but only with labels not yet used for currying before. | ||||
| // | ||||
| // The metrics contained in the CounterVec are shared between the curried and | ||||
| // uncurried vectors. They are just accessed differently. Curried and uncurried | ||||
| // vectors behave identically in terms of collection. Only one must be | ||||
| // registered with a given registry (usually the uncurried version). The Reset | ||||
| // method deletes all metrics, even if called on a curried vector. | ||||
| func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { | ||||
| 	vec, err := v.MetricVec.CurryWith(labels) | ||||
| 	if vec != nil { | ||||
| 		return &CounterVec{vec}, err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // MustCurryWith works as CurryWith but panics where CurryWith would have | ||||
| // returned an error. | ||||
| func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { | ||||
| 	vec, err := v.CurryWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return vec | ||||
| } | ||||
|  | ||||
| // CounterFunc is a Counter whose value is determined at collect time by calling a | ||||
| // provided function. | ||||
| // | ||||
| // To create CounterFunc instances, use NewCounterFunc. | ||||
| type CounterFunc interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
| } | ||||
|  | ||||
| // NewCounterFunc creates a new CounterFunc based on the provided | ||||
| // CounterOpts. The value reported is determined by calling the given function | ||||
| // from within the Write method. Take into account that metric collection may | ||||
| // happen concurrently. If that results in concurrent calls to Write, like in | ||||
| // the case where a CounterFunc is directly registered with Prometheus, the | ||||
| // provided function must be concurrency-safe. The function should also honor | ||||
| // the contract for a Counter (values only go up, not down), but compliance will | ||||
| // not be checked. | ||||
| // | ||||
| // Check out the ExampleGaugeFunc examples for the similar GaugeFunc. | ||||
| func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { | ||||
| 	return newValueFunc(NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		nil, | ||||
| 		opts.ConstLabels, | ||||
| 	), CounterValue, function) | ||||
| } | ||||
							
								
								
									
										186
									
								
								vendor/github.com/prometheus/client_golang/prometheus/desc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										186
									
								
								vendor/github.com/prometheus/client_golang/prometheus/desc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,186 +0,0 @@ | ||||
| // Copyright 2016 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/cespare/xxhash/v2" | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/prometheus/common/model" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // Desc is the descriptor used by every Prometheus Metric. It is essentially | ||||
| // the immutable meta-data of a Metric. The normal Metric implementations | ||||
| // included in this package manage their Desc under the hood. Users only have to | ||||
| // deal with Desc if they use advanced features like the ExpvarCollector or | ||||
| // custom Collectors and Metrics. | ||||
| // | ||||
| // Descriptors registered with the same registry have to fulfill certain | ||||
| // consistency and uniqueness criteria if they share the same fully-qualified | ||||
| // name: They must have the same help string and the same label names (aka label | ||||
| // dimensions) in each, constLabels and variableLabels, but they must differ in | ||||
| // the values of the constLabels. | ||||
| // | ||||
| // Descriptors that share the same fully-qualified names and the same label | ||||
| // values of their constLabels are considered equal. | ||||
| // | ||||
| // Use NewDesc to create new Desc instances. | ||||
| type Desc struct { | ||||
| 	// fqName has been built from Namespace, Subsystem, and Name. | ||||
| 	fqName string | ||||
| 	// help provides some helpful information about this metric. | ||||
| 	help string | ||||
| 	// constLabelPairs contains precalculated DTO label pairs based on | ||||
| 	// the constant labels. | ||||
| 	constLabelPairs []*dto.LabelPair | ||||
| 	// variableLabels contains names of labels for which the metric | ||||
| 	// maintains variable values. | ||||
| 	variableLabels []string | ||||
| 	// id is a hash of the values of the ConstLabels and fqName. This | ||||
| 	// must be unique among all registered descriptors and can therefore be | ||||
| 	// used as an identifier of the descriptor. | ||||
| 	id uint64 | ||||
| 	// dimHash is a hash of the label names (preset and variable) and the | ||||
| 	// Help string. Each Desc with the same fqName must have the same | ||||
| 	// dimHash. | ||||
| 	dimHash uint64 | ||||
| 	// err is an error that occurred during construction. It is reported on | ||||
| 	// registration time. | ||||
| 	err error | ||||
| } | ||||
|  | ||||
| // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc | ||||
| // and will be reported on registration time. variableLabels and constLabels can | ||||
| // be nil if no such labels should be set. fqName must not be empty. | ||||
| // | ||||
| // variableLabels only contain the label names. Their label values are variable | ||||
| // and therefore not part of the Desc. (They are managed within the Metric.) | ||||
| // | ||||
| // For constLabels, the label values are constant. Therefore, they are fully | ||||
| // specified in the Desc. See the Collector example for a usage pattern. | ||||
| func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { | ||||
| 	d := &Desc{ | ||||
| 		fqName:         fqName, | ||||
| 		help:           help, | ||||
| 		variableLabels: variableLabels, | ||||
| 	} | ||||
| 	if !model.IsValidMetricName(model.LabelValue(fqName)) { | ||||
| 		d.err = fmt.Errorf("%q is not a valid metric name", fqName) | ||||
| 		return d | ||||
| 	} | ||||
| 	// labelValues contains the label values of const labels (in order of | ||||
| 	// their sorted label names) plus the fqName (at position 0). | ||||
| 	labelValues := make([]string, 1, len(constLabels)+1) | ||||
| 	labelValues[0] = fqName | ||||
| 	labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) | ||||
| 	labelNameSet := map[string]struct{}{} | ||||
| 	// First add only the const label names and sort them... | ||||
| 	for labelName := range constLabels { | ||||
| 		if !checkLabelName(labelName) { | ||||
| 			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) | ||||
| 			return d | ||||
| 		} | ||||
| 		labelNames = append(labelNames, labelName) | ||||
| 		labelNameSet[labelName] = struct{}{} | ||||
| 	} | ||||
| 	sort.Strings(labelNames) | ||||
| 	// ... so that we can now add const label values in the order of their names. | ||||
| 	for _, labelName := range labelNames { | ||||
| 		labelValues = append(labelValues, constLabels[labelName]) | ||||
| 	} | ||||
| 	// Validate the const label values. They can't have a wrong cardinality, so | ||||
| 	// use in len(labelValues) as expectedNumberOfValues. | ||||
| 	if err := validateLabelValues(labelValues, len(labelValues)); err != nil { | ||||
| 		d.err = err | ||||
| 		return d | ||||
| 	} | ||||
| 	// Now add the variable label names, but prefix them with something that | ||||
| 	// cannot be in a regular label name. That prevents matching the label | ||||
| 	// dimension with a different mix between preset and variable labels. | ||||
| 	for _, labelName := range variableLabels { | ||||
| 		if !checkLabelName(labelName) { | ||||
| 			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) | ||||
| 			return d | ||||
| 		} | ||||
| 		labelNames = append(labelNames, "$"+labelName) | ||||
| 		labelNameSet[labelName] = struct{}{} | ||||
| 	} | ||||
| 	if len(labelNames) != len(labelNameSet) { | ||||
| 		d.err = errors.New("duplicate label names") | ||||
| 		return d | ||||
| 	} | ||||
|  | ||||
| 	xxh := xxhash.New() | ||||
| 	for _, val := range labelValues { | ||||
| 		xxh.WriteString(val) | ||||
| 		xxh.Write(separatorByteSlice) | ||||
| 	} | ||||
| 	d.id = xxh.Sum64() | ||||
| 	// Sort labelNames so that order doesn't matter for the hash. | ||||
| 	sort.Strings(labelNames) | ||||
| 	// Now hash together (in this order) the help string and the sorted | ||||
| 	// label names. | ||||
| 	xxh.Reset() | ||||
| 	xxh.WriteString(help) | ||||
| 	xxh.Write(separatorByteSlice) | ||||
| 	for _, labelName := range labelNames { | ||||
| 		xxh.WriteString(labelName) | ||||
| 		xxh.Write(separatorByteSlice) | ||||
| 	} | ||||
| 	d.dimHash = xxh.Sum64() | ||||
|  | ||||
| 	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) | ||||
| 	for n, v := range constLabels { | ||||
| 		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ | ||||
| 			Name:  proto.String(n), | ||||
| 			Value: proto.String(v), | ||||
| 		}) | ||||
| 	} | ||||
| 	sort.Sort(labelPairSorter(d.constLabelPairs)) | ||||
| 	return d | ||||
| } | ||||
|  | ||||
| // NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the | ||||
| // provided error set. If a collector returning such a descriptor is registered, | ||||
| // registration will fail with the provided error. NewInvalidDesc can be used by | ||||
| // a Collector to signal inability to describe itself. | ||||
| func NewInvalidDesc(err error) *Desc { | ||||
| 	return &Desc{ | ||||
| 		err: err, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (d *Desc) String() string { | ||||
| 	lpStrings := make([]string, 0, len(d.constLabelPairs)) | ||||
| 	for _, lp := range d.constLabelPairs { | ||||
| 		lpStrings = append( | ||||
| 			lpStrings, | ||||
| 			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), | ||||
| 		) | ||||
| 	} | ||||
| 	return fmt.Sprintf( | ||||
| 		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", | ||||
| 		d.fqName, | ||||
| 		d.help, | ||||
| 		strings.Join(lpStrings, ","), | ||||
| 		d.variableLabels, | ||||
| 	) | ||||
| } | ||||
							
								
								
									
										199
									
								
								vendor/github.com/prometheus/client_golang/prometheus/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										199
									
								
								vendor/github.com/prometheus/client_golang/prometheus/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,199 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Package prometheus is the core instrumentation package. It provides metrics | ||||
| // primitives to instrument code for monitoring. It also offers a registry for | ||||
| // metrics. Sub-packages allow to expose the registered metrics via HTTP | ||||
| // (package promhttp) or push them to a Pushgateway (package push). There is | ||||
| // also a sub-package promauto, which provides metrics constructors with | ||||
| // automatic registration. | ||||
| // | ||||
| // All exported functions and methods are safe to be used concurrently unless | ||||
| // specified otherwise. | ||||
| // | ||||
| // A Basic Example | ||||
| // | ||||
| // As a starting point, a very basic usage example: | ||||
| // | ||||
| //    package main | ||||
| // | ||||
| //    import ( | ||||
| //    	"log" | ||||
| //    	"net/http" | ||||
| // | ||||
| //    	"github.com/prometheus/client_golang/prometheus" | ||||
| //    	"github.com/prometheus/client_golang/prometheus/promhttp" | ||||
| //    ) | ||||
| // | ||||
| //    var ( | ||||
| //    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ | ||||
| //    		Name: "cpu_temperature_celsius", | ||||
| //    		Help: "Current temperature of the CPU.", | ||||
| //    	}) | ||||
| //    	hdFailures = prometheus.NewCounterVec( | ||||
| //    		prometheus.CounterOpts{ | ||||
| //    			Name: "hd_errors_total", | ||||
| //    			Help: "Number of hard-disk errors.", | ||||
| //    		}, | ||||
| //    		[]string{"device"}, | ||||
| //    	) | ||||
| //    ) | ||||
| // | ||||
| //    func init() { | ||||
| //    	// Metrics have to be registered to be exposed: | ||||
| //    	prometheus.MustRegister(cpuTemp) | ||||
| //    	prometheus.MustRegister(hdFailures) | ||||
| //    } | ||||
| // | ||||
| //    func main() { | ||||
| //    	cpuTemp.Set(65.3) | ||||
| //    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() | ||||
| // | ||||
| //    	// The Handler function provides a default handler to expose metrics | ||||
| //    	// via an HTTP server. "/metrics" is the usual endpoint for that. | ||||
| //    	http.Handle("/metrics", promhttp.Handler()) | ||||
| //    	log.Fatal(http.ListenAndServe(":8080", nil)) | ||||
| //    } | ||||
| // | ||||
| // | ||||
| // This is a complete program that exports two metrics, a Gauge and a Counter, | ||||
| // the latter with a label attached to turn it into a (one-dimensional) vector. | ||||
| // | ||||
| // Metrics | ||||
| // | ||||
| // The number of exported identifiers in this package might appear a bit | ||||
| // overwhelming. However, in addition to the basic plumbing shown in the example | ||||
| // above, you only need to understand the different metric types and their | ||||
| // vector versions for basic usage. Furthermore, if you are not concerned with | ||||
| // fine-grained control of when and how to register metrics with the registry, | ||||
| // have a look at the promauto package, which will effectively allow you to | ||||
| // ignore registration altogether in simple cases. | ||||
| // | ||||
| // Above, you have already touched the Counter and the Gauge. There are two more | ||||
| // advanced metric types: the Summary and Histogram. A more thorough description | ||||
| // of those four metric types can be found in the Prometheus docs: | ||||
| // https://prometheus.io/docs/concepts/metric_types/ | ||||
| // | ||||
| // In addition to the fundamental metric types Gauge, Counter, Summary, and | ||||
| // Histogram, a very important part of the Prometheus data model is the | ||||
| // partitioning of samples along dimensions called labels, which results in | ||||
| // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, | ||||
| // and HistogramVec. | ||||
| // | ||||
| // While only the fundamental metric types implement the Metric interface, both | ||||
| // the metrics and their vector versions implement the Collector interface. A | ||||
| // Collector manages the collection of a number of Metrics, but for convenience, | ||||
| // a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and | ||||
| // Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, | ||||
| // and HistogramVec are not. | ||||
| // | ||||
| // To create instances of Metrics and their vector versions, you need a suitable | ||||
| // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. | ||||
| // | ||||
| // Custom Collectors and constant Metrics | ||||
| // | ||||
| // While you could create your own implementations of Metric, most likely you | ||||
| // will only ever implement the Collector interface on your own. At a first | ||||
| // glance, a custom Collector seems handy to bundle Metrics for common | ||||
| // registration (with the prime example of the different metric vectors above, | ||||
| // which bundle all the metrics of the same name but with different labels). | ||||
| // | ||||
| // There is a more involved use case, too: If you already have metrics | ||||
| // available, created outside of the Prometheus context, you don't need the | ||||
| // interface of the various Metric types. You essentially want to mirror the | ||||
| // existing numbers into Prometheus Metrics during collection. An own | ||||
| // implementation of the Collector interface is perfect for that. You can create | ||||
| // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and | ||||
| // NewConstSummary (and their respective Must… versions). NewConstMetric is used | ||||
| // for all metric types with just a float64 as their value: Counter, Gauge, and | ||||
| // a special “type” called Untyped. Use the latter if you are not sure if the | ||||
| // mirrored metric is a Counter or a Gauge. Creation of the Metric instance | ||||
| // happens in the Collect method. The Describe method has to return separate | ||||
| // Desc instances, representative of the “throw-away” metrics to be created | ||||
| // later.  NewDesc comes in handy to create those Desc instances. Alternatively, | ||||
| // you could return no Desc at all, which will mark the Collector “unchecked”. | ||||
| // No checks are performed at registration time, but metric consistency will | ||||
| // still be ensured at scrape time, i.e. any inconsistencies will lead to scrape | ||||
| // errors. Thus, with unchecked Collectors, the responsibility to not collect | ||||
| // metrics that lead to inconsistencies in the total scrape result lies with the | ||||
| // implementer of the Collector. While this is not a desirable state, it is | ||||
| // sometimes necessary. The typical use case is a situation where the exact | ||||
| // metrics to be returned by a Collector cannot be predicted at registration | ||||
| // time, but the implementer has sufficient knowledge of the whole system to | ||||
| // guarantee metric consistency. | ||||
| // | ||||
| // The Collector example illustrates the use case. You can also look at the | ||||
| // source code of the processCollector (mirroring process metrics), the | ||||
| // goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar | ||||
| // metrics) as examples that are used in this package itself. | ||||
| // | ||||
| // If you just need to call a function to get a single float value to collect as | ||||
| // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting | ||||
| // shortcuts. | ||||
| // | ||||
| // Advanced Uses of the Registry | ||||
| // | ||||
| // While MustRegister is the by far most common way of registering a Collector, | ||||
| // sometimes you might want to handle the errors the registration might cause. | ||||
| // As suggested by the name, MustRegister panics if an error occurs. With the | ||||
| // Register function, the error is returned and can be handled. | ||||
| // | ||||
| // An error is returned if the registered Collector is incompatible or | ||||
| // inconsistent with already registered metrics. The registry aims for | ||||
| // consistency of the collected metrics according to the Prometheus data model. | ||||
| // Inconsistencies are ideally detected at registration time, not at collect | ||||
| // time. The former will usually be detected at start-up time of a program, | ||||
| // while the latter will only happen at scrape time, possibly not even on the | ||||
| // first scrape if the inconsistency only becomes relevant later. That is the | ||||
| // main reason why a Collector and a Metric have to describe themselves to the | ||||
| // registry. | ||||
| // | ||||
| // So far, everything we did operated on the so-called default registry, as it | ||||
| // can be found in the global DefaultRegisterer variable. With NewRegistry, you | ||||
| // can create a custom registry, or you can even implement the Registerer or | ||||
| // Gatherer interfaces yourself. The methods Register and Unregister work in the | ||||
| // same way on a custom registry as the global functions Register and Unregister | ||||
| // on the default registry. | ||||
| // | ||||
| // There are a number of uses for custom registries: You can use registries with | ||||
| // special properties, see NewPedanticRegistry. You can avoid global state, as | ||||
| // it is imposed by the DefaultRegisterer. You can use multiple registries at | ||||
| // the same time to expose different metrics in different ways.  You can use | ||||
| // separate registries for testing purposes. | ||||
| // | ||||
| // Also note that the DefaultRegisterer comes registered with a Collector for Go | ||||
| // runtime metrics (via NewGoCollector) and a Collector for process metrics (via | ||||
| // NewProcessCollector). With a custom registry, you are in control and decide | ||||
| // yourself about the Collectors to register. | ||||
| // | ||||
| // HTTP Exposition | ||||
| // | ||||
| // The Registry implements the Gatherer interface. The caller of the Gather | ||||
| // method can then expose the gathered metrics in some way. Usually, the metrics | ||||
| // are served via HTTP on the /metrics endpoint. That's happening in the example | ||||
| // above. The tools to expose metrics via HTTP are in the promhttp sub-package. | ||||
| // | ||||
| // Pushing to the Pushgateway | ||||
| // | ||||
| // Function for pushing to the Pushgateway can be found in the push sub-package. | ||||
| // | ||||
| // Graphite Bridge | ||||
| // | ||||
| // Functions and examples to push metrics from a Gatherer to Graphite can be | ||||
| // found in the graphite sub-package. | ||||
| // | ||||
| // Other Means of Exposition | ||||
| // | ||||
| // More ways of exposing metrics can easily be added by following the approaches | ||||
| // of the existing implementations. | ||||
| package prometheus | ||||
							
								
								
									
										119
									
								
								vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										119
									
								
								vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,119 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"expvar" | ||||
| ) | ||||
|  | ||||
| type expvarCollector struct { | ||||
| 	exports map[string]*Desc | ||||
| } | ||||
|  | ||||
| // NewExpvarCollector returns a newly allocated expvar Collector that still has | ||||
| // to be registered with a Prometheus registry. | ||||
| // | ||||
| // An expvar Collector collects metrics from the expvar interface. It provides a | ||||
| // quick way to expose numeric values that are already exported via expvar as | ||||
| // Prometheus metrics. Note that the data models of expvar and Prometheus are | ||||
| // fundamentally different, and that the expvar Collector is inherently slower | ||||
| // than native Prometheus metrics. Thus, the expvar Collector is probably great | ||||
| // for experiments and prototying, but you should seriously consider a more | ||||
| // direct implementation of Prometheus metrics for monitoring production | ||||
| // systems. | ||||
| // | ||||
| // The exports map has the following meaning: | ||||
| // | ||||
| // The keys in the map correspond to expvar keys, i.e. for every expvar key you | ||||
| // want to export as Prometheus metric, you need an entry in the exports | ||||
| // map. The descriptor mapped to each key describes how to export the expvar | ||||
| // value. It defines the name and the help string of the Prometheus metric | ||||
| // proxying the expvar value. The type will always be Untyped. | ||||
| // | ||||
| // For descriptors without variable labels, the expvar value must be a number or | ||||
| // a bool. The number is then directly exported as the Prometheus sample | ||||
| // value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values | ||||
| // that are not numbers or bools are silently ignored. | ||||
| // | ||||
| // If the descriptor has one variable label, the expvar value must be an expvar | ||||
| // map. The keys in the expvar map become the various values of the one | ||||
| // Prometheus label. The values in the expvar map must be numbers or bools again | ||||
| // as above. | ||||
| // | ||||
| // For descriptors with more than one variable label, the expvar must be a | ||||
| // nested expvar map, i.e. where the values of the topmost map are maps again | ||||
| // etc. until a depth is reached that corresponds to the number of labels. The | ||||
| // leaves of that structure must be numbers or bools as above to serve as the | ||||
| // sample values. | ||||
| // | ||||
| // Anything that does not fit into the scheme above is silently ignored. | ||||
| func NewExpvarCollector(exports map[string]*Desc) Collector { | ||||
| 	return &expvarCollector{ | ||||
| 		exports: exports, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Describe implements Collector. | ||||
| func (e *expvarCollector) Describe(ch chan<- *Desc) { | ||||
| 	for _, desc := range e.exports { | ||||
| 		ch <- desc | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Collect implements Collector. | ||||
| func (e *expvarCollector) Collect(ch chan<- Metric) { | ||||
| 	for name, desc := range e.exports { | ||||
| 		var m Metric | ||||
| 		expVar := expvar.Get(name) | ||||
| 		if expVar == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		var v interface{} | ||||
| 		labels := make([]string, len(desc.variableLabels)) | ||||
| 		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { | ||||
| 			ch <- NewInvalidMetric(desc, err) | ||||
| 			continue | ||||
| 		} | ||||
| 		var processValue func(v interface{}, i int) | ||||
| 		processValue = func(v interface{}, i int) { | ||||
| 			if i >= len(labels) { | ||||
| 				copiedLabels := append(make([]string, 0, len(labels)), labels...) | ||||
| 				switch v := v.(type) { | ||||
| 				case float64: | ||||
| 					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) | ||||
| 				case bool: | ||||
| 					if v { | ||||
| 						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) | ||||
| 					} else { | ||||
| 						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) | ||||
| 					} | ||||
| 				default: | ||||
| 					return | ||||
| 				} | ||||
| 				ch <- m | ||||
| 				return | ||||
| 			} | ||||
| 			vm, ok := v.(map[string]interface{}) | ||||
| 			if !ok { | ||||
| 				return | ||||
| 			} | ||||
| 			for lv, val := range vm { | ||||
| 				labels[i] = lv | ||||
| 				processValue(val, i+1) | ||||
| 			} | ||||
| 		} | ||||
| 		processValue(v, 0) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										42
									
								
								vendor/github.com/prometheus/client_golang/prometheus/fnv.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/prometheus/client_golang/prometheus/fnv.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,42 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| // Inline and byte-free variant of hash/fnv's fnv64a. | ||||
|  | ||||
| const ( | ||||
| 	offset64 = 14695981039346656037 | ||||
| 	prime64  = 1099511628211 | ||||
| ) | ||||
|  | ||||
| // hashNew initializies a new fnv64a hash value. | ||||
| func hashNew() uint64 { | ||||
| 	return offset64 | ||||
| } | ||||
|  | ||||
| // hashAdd adds a string to a fnv64a hash value, returning the updated hash. | ||||
| func hashAdd(h uint64, s string) uint64 { | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		h ^= uint64(s[i]) | ||||
| 		h *= prime64 | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. | ||||
| func hashAddByte(h uint64, b byte) uint64 { | ||||
| 	h ^= uint64(b) | ||||
| 	h *= prime64 | ||||
| 	return h | ||||
| } | ||||
							
								
								
									
										289
									
								
								vendor/github.com/prometheus/client_golang/prometheus/gauge.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										289
									
								
								vendor/github.com/prometheus/client_golang/prometheus/gauge.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,289 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"math" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // Gauge is a Metric that represents a single numerical value that can | ||||
| // arbitrarily go up and down. | ||||
| // | ||||
| // A Gauge is typically used for measured values like temperatures or current | ||||
| // memory usage, but also "counts" that can go up and down, like the number of | ||||
| // running goroutines. | ||||
| // | ||||
| // To create Gauge instances, use NewGauge. | ||||
| type Gauge interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
|  | ||||
| 	// Set sets the Gauge to an arbitrary value. | ||||
| 	Set(float64) | ||||
| 	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary | ||||
| 	// values. | ||||
| 	Inc() | ||||
| 	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary | ||||
| 	// values. | ||||
| 	Dec() | ||||
| 	// Add adds the given value to the Gauge. (The value can be negative, | ||||
| 	// resulting in a decrease of the Gauge.) | ||||
| 	Add(float64) | ||||
| 	// Sub subtracts the given value from the Gauge. (The value can be | ||||
| 	// negative, resulting in an increase of the Gauge.) | ||||
| 	Sub(float64) | ||||
|  | ||||
| 	// SetToCurrentTime sets the Gauge to the current Unix time in seconds. | ||||
| 	SetToCurrentTime() | ||||
| } | ||||
|  | ||||
| // GaugeOpts is an alias for Opts. See there for doc comments. | ||||
| type GaugeOpts Opts | ||||
|  | ||||
| // NewGauge creates a new Gauge based on the provided GaugeOpts. | ||||
| // | ||||
| // The returned implementation is optimized for a fast Set method. If you have a | ||||
| // choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick | ||||
| // the former. For example, the Inc method of the returned Gauge is slower than | ||||
| // the Inc method of a Counter returned by NewCounter. This matches the typical | ||||
| // scenarios for Gauges and Counters, where the former tends to be Set-heavy and | ||||
| // the latter Inc-heavy. | ||||
| func NewGauge(opts GaugeOpts) Gauge { | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		nil, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} | ||||
| 	result.init(result) // Init self-collection. | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| type gauge struct { | ||||
| 	// valBits contains the bits of the represented float64 value. It has | ||||
| 	// to go first in the struct to guarantee alignment for atomic | ||||
| 	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	valBits uint64 | ||||
|  | ||||
| 	selfCollector | ||||
|  | ||||
| 	desc       *Desc | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| func (g *gauge) Desc() *Desc { | ||||
| 	return g.desc | ||||
| } | ||||
|  | ||||
| func (g *gauge) Set(val float64) { | ||||
| 	atomic.StoreUint64(&g.valBits, math.Float64bits(val)) | ||||
| } | ||||
|  | ||||
| func (g *gauge) SetToCurrentTime() { | ||||
| 	g.Set(float64(time.Now().UnixNano()) / 1e9) | ||||
| } | ||||
|  | ||||
| func (g *gauge) Inc() { | ||||
| 	g.Add(1) | ||||
| } | ||||
|  | ||||
| func (g *gauge) Dec() { | ||||
| 	g.Add(-1) | ||||
| } | ||||
|  | ||||
| func (g *gauge) Add(val float64) { | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&g.valBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + val) | ||||
| 		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (g *gauge) Sub(val float64) { | ||||
| 	g.Add(val * -1) | ||||
| } | ||||
|  | ||||
| func (g *gauge) Write(out *dto.Metric) error { | ||||
| 	val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) | ||||
| 	return populateMetric(GaugeValue, val, g.labelPairs, nil, out) | ||||
| } | ||||
|  | ||||
| // GaugeVec is a Collector that bundles a set of Gauges that all share the same | ||||
| // Desc, but have different values for their variable labels. This is used if | ||||
| // you want to count the same thing partitioned by various dimensions | ||||
| // (e.g. number of operations queued, partitioned by user and operation | ||||
| // type). Create instances with NewGaugeVec. | ||||
| type GaugeVec struct { | ||||
| 	*MetricVec | ||||
| } | ||||
|  | ||||
| // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and | ||||
| // partitioned by the given label names. | ||||
| func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		labelNames, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	return &GaugeVec{ | ||||
| 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { | ||||
| 			if len(lvs) != len(desc.variableLabels) { | ||||
| 				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) | ||||
| 			} | ||||
| 			result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} | ||||
| 			result.init(result) // Init self-collection. | ||||
| 			return result | ||||
| 		}), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetMetricWithLabelValues returns the Gauge for the given slice of label | ||||
| // values (same order as the variable labels in Desc). If that combination of | ||||
| // label values is accessed for the first time, a new Gauge is created. | ||||
| // | ||||
| // It is possible to call this method without using the returned Gauge to only | ||||
| // create the new Gauge but leave it at its starting value 0. See also the | ||||
| // SummaryVec example. | ||||
| // | ||||
| // Keeping the Gauge for later use is possible (and should be considered if | ||||
| // performance is critical), but keep in mind that Reset, DeleteLabelValues and | ||||
| // Delete can be used to delete the Gauge from the GaugeVec. In that case, the | ||||
| // Gauge will still exist, but it will not be exported anymore, even if a | ||||
| // Gauge with the same label values is created later. See also the CounterVec | ||||
| // example. | ||||
| // | ||||
| // An error is returned if the number of label values is not the same as the | ||||
| // number of variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | ||||
| // an alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Gauge), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // GetMetricWith returns the Gauge for the given Labels map (the label names | ||||
| // must match those of the variable labels in Desc). If that label map is | ||||
| // accessed for the first time, a new Gauge is created. Implications of | ||||
| // creating a Gauge without using it and keeping the Gauge for later use are | ||||
| // the same as for GetMetricWithLabelValues. | ||||
| // | ||||
| // An error is returned if the number and names of the Labels are inconsistent | ||||
| // with those of the variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // This method is used for the same purpose as | ||||
| // GetMetricWithLabelValues(...string). See there for pros and cons of the two | ||||
| // methods. | ||||
| func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWith(labels) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Gauge), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // WithLabelValues works as GetMetricWithLabelValues, but panics where | ||||
| // GetMetricWithLabelValues would have returned an error. Not returning an | ||||
| // error allows shortcuts like | ||||
| //     myVec.WithLabelValues("404", "GET").Add(42) | ||||
| func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { | ||||
| 	g, err := v.GetMetricWithLabelValues(lvs...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return g | ||||
| } | ||||
|  | ||||
| // With works as GetMetricWith, but panics where GetMetricWithLabels would have | ||||
| // returned an error. Not returning an error allows shortcuts like | ||||
| //     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) | ||||
| func (v *GaugeVec) With(labels Labels) Gauge { | ||||
| 	g, err := v.GetMetricWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return g | ||||
| } | ||||
|  | ||||
| // CurryWith returns a vector curried with the provided labels, i.e. the | ||||
| // returned vector has those labels pre-set for all labeled operations performed | ||||
| // on it. The cardinality of the curried vector is reduced accordingly. The | ||||
| // order of the remaining labels stays the same (just with the curried labels | ||||
| // taken out of the sequence – which is relevant for the | ||||
| // (GetMetric)WithLabelValues methods). It is possible to curry a curried | ||||
| // vector, but only with labels not yet used for currying before. | ||||
| // | ||||
| // The metrics contained in the GaugeVec are shared between the curried and | ||||
| // uncurried vectors. They are just accessed differently. Curried and uncurried | ||||
| // vectors behave identically in terms of collection. Only one must be | ||||
| // registered with a given registry (usually the uncurried version). The Reset | ||||
| // method deletes all metrics, even if called on a curried vector. | ||||
| func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { | ||||
| 	vec, err := v.MetricVec.CurryWith(labels) | ||||
| 	if vec != nil { | ||||
| 		return &GaugeVec{vec}, err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // MustCurryWith works as CurryWith but panics where CurryWith would have | ||||
| // returned an error. | ||||
| func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { | ||||
| 	vec, err := v.CurryWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return vec | ||||
| } | ||||
|  | ||||
| // GaugeFunc is a Gauge whose value is determined at collect time by calling a | ||||
| // provided function. | ||||
| // | ||||
| // To create GaugeFunc instances, use NewGaugeFunc. | ||||
| type GaugeFunc interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
| } | ||||
|  | ||||
| // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The | ||||
| // value reported is determined by calling the given function from within the | ||||
| // Write method. Take into account that metric collection may happen | ||||
| // concurrently. Therefore, it must be safe to call the provided function | ||||
| // concurrently. | ||||
| // | ||||
| // NewGaugeFunc is a good way to create an “info” style metric with a constant | ||||
| // value of 1. Example: | ||||
| // https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 | ||||
| func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { | ||||
| 	return newValueFunc(NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		nil, | ||||
| 		opts.ConstLabels, | ||||
| 	), GaugeValue, function) | ||||
| } | ||||
							
								
								
									
										397
									
								
								vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										397
									
								
								vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,397 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"runtime" | ||||
| 	"runtime/debug" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| type goCollector struct { | ||||
| 	goroutinesDesc *Desc | ||||
| 	threadsDesc    *Desc | ||||
| 	gcDesc         *Desc | ||||
| 	goInfoDesc     *Desc | ||||
|  | ||||
| 	// ms... are memstats related. | ||||
| 	msLast          *runtime.MemStats // Previously collected memstats. | ||||
| 	msLastTimestamp time.Time | ||||
| 	msMtx           sync.Mutex // Protects msLast and msLastTimestamp. | ||||
| 	msMetrics       memStatsMetrics | ||||
| 	msRead          func(*runtime.MemStats) // For mocking in tests. | ||||
| 	msMaxWait       time.Duration           // Wait time for fresh memstats. | ||||
| 	msMaxAge        time.Duration           // Maximum allowed age of old memstats. | ||||
| } | ||||
|  | ||||
| // NewGoCollector returns a collector that exports metrics about the current Go | ||||
| // process. This includes memory stats. To collect those, runtime.ReadMemStats | ||||
| // is called. This requires to “stop the world”, which usually only happens for | ||||
| // garbage collection (GC). Take the following implications into account when | ||||
| // deciding whether to use the Go collector: | ||||
| // | ||||
| // 1. The performance impact of stopping the world is the more relevant the more | ||||
| // frequently metrics are collected. However, with Go1.9 or later the | ||||
| // stop-the-world time per metrics collection is very short (~25µs) so that the | ||||
| // performance impact will only matter in rare cases. However, with older Go | ||||
| // versions, the stop-the-world duration depends on the heap size and can be | ||||
| // quite significant (~1.7 ms/GiB as per | ||||
| // https://go-review.googlesource.com/c/go/+/34937). | ||||
| // | ||||
| // 2. During an ongoing GC, nothing else can stop the world. Therefore, if the | ||||
| // metrics collection happens to coincide with GC, it will only complete after | ||||
| // GC has finished. Usually, GC is fast enough to not cause problems. However, | ||||
| // with a very large heap, GC might take multiple seconds, which is enough to | ||||
| // cause scrape timeouts in common setups. To avoid this problem, the Go | ||||
| // collector will use the memstats from a previous collection if | ||||
| // runtime.ReadMemStats takes more than 1s. However, if there are no previously | ||||
| // collected memstats, or their collection is more than 5m ago, the collection | ||||
| // will block until runtime.ReadMemStats succeeds. | ||||
| // | ||||
| // NOTE: The problem is solved in Go 1.15, see | ||||
| // https://github.com/golang/go/issues/19812 for the related Go issue. | ||||
| func NewGoCollector() Collector { | ||||
| 	return &goCollector{ | ||||
| 		goroutinesDesc: NewDesc( | ||||
| 			"go_goroutines", | ||||
| 			"Number of goroutines that currently exist.", | ||||
| 			nil, nil), | ||||
| 		threadsDesc: NewDesc( | ||||
| 			"go_threads", | ||||
| 			"Number of OS threads created.", | ||||
| 			nil, nil), | ||||
| 		gcDesc: NewDesc( | ||||
| 			"go_gc_duration_seconds", | ||||
| 			"A summary of the pause duration of garbage collection cycles.", | ||||
| 			nil, nil), | ||||
| 		goInfoDesc: NewDesc( | ||||
| 			"go_info", | ||||
| 			"Information about the Go environment.", | ||||
| 			nil, Labels{"version": runtime.Version()}), | ||||
| 		msLast:    &runtime.MemStats{}, | ||||
| 		msRead:    runtime.ReadMemStats, | ||||
| 		msMaxWait: time.Second, | ||||
| 		msMaxAge:  5 * time.Minute, | ||||
| 		msMetrics: memStatsMetrics{ | ||||
| 			{ | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("alloc_bytes"), | ||||
| 					"Number of bytes allocated and still in use.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("alloc_bytes_total"), | ||||
| 					"Total number of bytes allocated, even if freed.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, | ||||
| 				valType: CounterValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("sys_bytes"), | ||||
| 					"Number of bytes obtained from system.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("lookups_total"), | ||||
| 					"Total number of pointer lookups.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, | ||||
| 				valType: CounterValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("mallocs_total"), | ||||
| 					"Total number of mallocs.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, | ||||
| 				valType: CounterValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("frees_total"), | ||||
| 					"Total number of frees.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, | ||||
| 				valType: CounterValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_alloc_bytes"), | ||||
| 					"Number of heap bytes allocated and still in use.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_sys_bytes"), | ||||
| 					"Number of heap bytes obtained from system.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_idle_bytes"), | ||||
| 					"Number of heap bytes waiting to be used.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_inuse_bytes"), | ||||
| 					"Number of heap bytes that are in use.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_released_bytes"), | ||||
| 					"Number of heap bytes released to OS.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("heap_objects"), | ||||
| 					"Number of allocated objects.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("stack_inuse_bytes"), | ||||
| 					"Number of bytes in use by the stack allocator.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("stack_sys_bytes"), | ||||
| 					"Number of bytes obtained from system for stack allocator.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("mspan_inuse_bytes"), | ||||
| 					"Number of bytes in use by mspan structures.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("mspan_sys_bytes"), | ||||
| 					"Number of bytes used for mspan structures obtained from system.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("mcache_inuse_bytes"), | ||||
| 					"Number of bytes in use by mcache structures.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("mcache_sys_bytes"), | ||||
| 					"Number of bytes used for mcache structures obtained from system.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("buck_hash_sys_bytes"), | ||||
| 					"Number of bytes used by the profiling bucket hash table.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("gc_sys_bytes"), | ||||
| 					"Number of bytes used for garbage collection system metadata.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("other_sys_bytes"), | ||||
| 					"Number of bytes used for other system allocations.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("next_gc_bytes"), | ||||
| 					"Number of heap bytes when next garbage collection will take place.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("last_gc_time_seconds"), | ||||
| 					"Number of seconds since 1970 of last garbage collection.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, { | ||||
| 				desc: NewDesc( | ||||
| 					memstatNamespace("gc_cpu_fraction"), | ||||
| 					"The fraction of this program's available CPU time used by the GC since the program started.", | ||||
| 					nil, nil, | ||||
| 				), | ||||
| 				eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, | ||||
| 				valType: GaugeValue, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func memstatNamespace(s string) string { | ||||
| 	return "go_memstats_" + s | ||||
| } | ||||
|  | ||||
| // Describe returns all descriptions of the collector. | ||||
| func (c *goCollector) Describe(ch chan<- *Desc) { | ||||
| 	ch <- c.goroutinesDesc | ||||
| 	ch <- c.threadsDesc | ||||
| 	ch <- c.gcDesc | ||||
| 	ch <- c.goInfoDesc | ||||
| 	for _, i := range c.msMetrics { | ||||
| 		ch <- i.desc | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Collect returns the current state of all metrics of the collector. | ||||
| func (c *goCollector) Collect(ch chan<- Metric) { | ||||
| 	var ( | ||||
| 		ms   = &runtime.MemStats{} | ||||
| 		done = make(chan struct{}) | ||||
| 	) | ||||
| 	// Start reading memstats first as it might take a while. | ||||
| 	go func() { | ||||
| 		c.msRead(ms) | ||||
| 		c.msMtx.Lock() | ||||
| 		c.msLast = ms | ||||
| 		c.msLastTimestamp = time.Now() | ||||
| 		c.msMtx.Unlock() | ||||
| 		close(done) | ||||
| 	}() | ||||
|  | ||||
| 	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) | ||||
| 	n, _ := runtime.ThreadCreateProfile(nil) | ||||
| 	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) | ||||
|  | ||||
| 	var stats debug.GCStats | ||||
| 	stats.PauseQuantiles = make([]time.Duration, 5) | ||||
| 	debug.ReadGCStats(&stats) | ||||
|  | ||||
| 	quantiles := make(map[float64]float64) | ||||
| 	for idx, pq := range stats.PauseQuantiles[1:] { | ||||
| 		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() | ||||
| 	} | ||||
| 	quantiles[0.0] = stats.PauseQuantiles[0].Seconds() | ||||
| 	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) | ||||
|  | ||||
| 	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) | ||||
|  | ||||
| 	timer := time.NewTimer(c.msMaxWait) | ||||
| 	select { | ||||
| 	case <-done: // Our own ReadMemStats succeeded in time. Use it. | ||||
| 		timer.Stop() // Important for high collection frequencies to not pile up timers. | ||||
| 		c.msCollect(ch, ms) | ||||
| 		return | ||||
| 	case <-timer.C: // Time out, use last memstats if possible. Continue below. | ||||
| 	} | ||||
| 	c.msMtx.Lock() | ||||
| 	if time.Since(c.msLastTimestamp) < c.msMaxAge { | ||||
| 		// Last memstats are recent enough. Collect from them under the lock. | ||||
| 		c.msCollect(ch, c.msLast) | ||||
| 		c.msMtx.Unlock() | ||||
| 		return | ||||
| 	} | ||||
| 	// If we are here, the last memstats are too old or don't exist. We have | ||||
| 	// to wait until our own ReadMemStats finally completes. For that to | ||||
| 	// happen, we have to release the lock. | ||||
| 	c.msMtx.Unlock() | ||||
| 	<-done | ||||
| 	c.msCollect(ch, ms) | ||||
| } | ||||
|  | ||||
| func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { | ||||
| 	for _, i := range c.msMetrics { | ||||
| 		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // memStatsMetrics provide description, value, and value type for memstat metrics. | ||||
| type memStatsMetrics []struct { | ||||
| 	desc    *Desc | ||||
| 	eval    func(*runtime.MemStats) float64 | ||||
| 	valType ValueType | ||||
| } | ||||
|  | ||||
| // NewBuildInfoCollector returns a collector collecting a single metric | ||||
| // "go_build_info" with the constant value 1 and three labels "path", "version", | ||||
| // and "checksum". Their label values contain the main module path, version, and | ||||
| // checksum, respectively. The labels will only have meaningful values if the | ||||
| // binary is built with Go module support and from source code retrieved from | ||||
| // the source repository (rather than the local file system). This is usually | ||||
| // accomplished by building from outside of GOPATH, specifying the full address | ||||
| // of the main package, e.g. "GO111MODULE=on go run | ||||
| // github.com/prometheus/client_golang/examples/random". If built without Go | ||||
| // module support, all label values will be "unknown". If built with Go module | ||||
| // support but using the source code from the local file system, the "path" will | ||||
| // be set appropriately, but "checksum" will be empty and "version" will be | ||||
| // "(devel)". | ||||
| // | ||||
| // This collector uses only the build information for the main module. See | ||||
| // https://github.com/povilasv/prommod for an example of a collector for the | ||||
| // module dependencies. | ||||
| func NewBuildInfoCollector() Collector { | ||||
| 	path, version, sum := readBuildInfo() | ||||
| 	c := &selfCollector{MustNewConstMetric( | ||||
| 		NewDesc( | ||||
| 			"go_build_info", | ||||
| 			"Build information about the main Go module.", | ||||
| 			nil, Labels{"path": path, "version": version, "checksum": sum}, | ||||
| 		), | ||||
| 		GaugeValue, 1)} | ||||
| 	c.init(c.self) | ||||
| 	return c | ||||
| } | ||||
							
								
								
									
										637
									
								
								vendor/github.com/prometheus/client_golang/prometheus/histogram.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										637
									
								
								vendor/github.com/prometheus/client_golang/prometheus/histogram.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,637 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"runtime" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // A Histogram counts individual observations from an event or sample stream in | ||||
| // configurable buckets. Similar to a summary, it also provides a sum of | ||||
| // observations and an observation count. | ||||
| // | ||||
| // On the Prometheus server, quantiles can be calculated from a Histogram using | ||||
| // the histogram_quantile function in the query language. | ||||
| // | ||||
| // Note that Histograms, in contrast to Summaries, can be aggregated with the | ||||
| // Prometheus query language (see the documentation for detailed | ||||
| // procedures). However, Histograms require the user to pre-define suitable | ||||
| // buckets, and they are in general less accurate. The Observe method of a | ||||
| // Histogram has a very low performance overhead in comparison with the Observe | ||||
| // method of a Summary. | ||||
| // | ||||
| // To create Histogram instances, use NewHistogram. | ||||
| type Histogram interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
|  | ||||
| 	// Observe adds a single observation to the histogram. | ||||
| 	Observe(float64) | ||||
| } | ||||
|  | ||||
| // bucketLabel is used for the label that defines the upper bound of a | ||||
| // bucket of a histogram ("le" -> "less or equal"). | ||||
| const bucketLabel = "le" | ||||
|  | ||||
| // DefBuckets are the default Histogram buckets. The default buckets are | ||||
| // tailored to broadly measure the response time (in seconds) of a network | ||||
| // service. Most likely, however, you will be required to define buckets | ||||
| // customized to your use case. | ||||
| var ( | ||||
| 	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} | ||||
|  | ||||
| 	errBucketLabelNotAllowed = fmt.Errorf( | ||||
| 		"%q is not allowed as label name in histograms", bucketLabel, | ||||
| 	) | ||||
| ) | ||||
|  | ||||
| // LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest | ||||
| // bucket has an upper bound of 'start'. The final +Inf bucket is not counted | ||||
| // and not included in the returned slice. The returned slice is meant to be | ||||
| // used for the Buckets field of HistogramOpts. | ||||
| // | ||||
| // The function panics if 'count' is zero or negative. | ||||
| func LinearBuckets(start, width float64, count int) []float64 { | ||||
| 	if count < 1 { | ||||
| 		panic("LinearBuckets needs a positive count") | ||||
| 	} | ||||
| 	buckets := make([]float64, count) | ||||
| 	for i := range buckets { | ||||
| 		buckets[i] = start | ||||
| 		start += width | ||||
| 	} | ||||
| 	return buckets | ||||
| } | ||||
|  | ||||
| // ExponentialBuckets creates 'count' buckets, where the lowest bucket has an | ||||
| // upper bound of 'start' and each following bucket's upper bound is 'factor' | ||||
| // times the previous bucket's upper bound. The final +Inf bucket is not counted | ||||
| // and not included in the returned slice. The returned slice is meant to be | ||||
| // used for the Buckets field of HistogramOpts. | ||||
| // | ||||
| // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, | ||||
| // or if 'factor' is less than or equal 1. | ||||
| func ExponentialBuckets(start, factor float64, count int) []float64 { | ||||
| 	if count < 1 { | ||||
| 		panic("ExponentialBuckets needs a positive count") | ||||
| 	} | ||||
| 	if start <= 0 { | ||||
| 		panic("ExponentialBuckets needs a positive start value") | ||||
| 	} | ||||
| 	if factor <= 1 { | ||||
| 		panic("ExponentialBuckets needs a factor greater than 1") | ||||
| 	} | ||||
| 	buckets := make([]float64, count) | ||||
| 	for i := range buckets { | ||||
| 		buckets[i] = start | ||||
| 		start *= factor | ||||
| 	} | ||||
| 	return buckets | ||||
| } | ||||
|  | ||||
| // HistogramOpts bundles the options for creating a Histogram metric. It is | ||||
| // mandatory to set Name to a non-empty string. All other fields are optional | ||||
| // and can safely be left at their zero value, although it is strongly | ||||
| // encouraged to set a Help string. | ||||
| type HistogramOpts struct { | ||||
| 	// Namespace, Subsystem, and Name are components of the fully-qualified | ||||
| 	// name of the Histogram (created by joining these components with | ||||
| 	// "_"). Only Name is mandatory, the others merely help structuring the | ||||
| 	// name. Note that the fully-qualified name of the Histogram must be a | ||||
| 	// valid Prometheus metric name. | ||||
| 	Namespace string | ||||
| 	Subsystem string | ||||
| 	Name      string | ||||
|  | ||||
| 	// Help provides information about this Histogram. | ||||
| 	// | ||||
| 	// Metrics with the same fully-qualified name must have the same Help | ||||
| 	// string. | ||||
| 	Help string | ||||
|  | ||||
| 	// ConstLabels are used to attach fixed labels to this metric. Metrics | ||||
| 	// with the same fully-qualified name must have the same label names in | ||||
| 	// their ConstLabels. | ||||
| 	// | ||||
| 	// ConstLabels are only used rarely. In particular, do not use them to | ||||
| 	// attach the same labels to all your metrics. Those use cases are | ||||
| 	// better covered by target labels set by the scraping Prometheus | ||||
| 	// server, or by one specific metric (e.g. a build_info or a | ||||
| 	// machine_role metric). See also | ||||
| 	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels | ||||
| 	ConstLabels Labels | ||||
|  | ||||
| 	// Buckets defines the buckets into which observations are counted. Each | ||||
| 	// element in the slice is the upper inclusive bound of a bucket. The | ||||
| 	// values must be sorted in strictly increasing order. There is no need | ||||
| 	// to add a highest bucket with +Inf bound, it will be added | ||||
| 	// implicitly. The default value is DefBuckets. | ||||
| 	Buckets []float64 | ||||
| } | ||||
|  | ||||
| // NewHistogram creates a new Histogram based on the provided HistogramOpts. It | ||||
| // panics if the buckets in HistogramOpts are not in strictly increasing order. | ||||
| // | ||||
| // The returned implementation also implements ExemplarObserver. It is safe to | ||||
| // perform the corresponding type assertion. Exemplars are tracked separately | ||||
| // for each bucket. | ||||
| func NewHistogram(opts HistogramOpts) Histogram { | ||||
| 	return newHistogram( | ||||
| 		NewDesc( | ||||
| 			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 			opts.Help, | ||||
| 			nil, | ||||
| 			opts.ConstLabels, | ||||
| 		), | ||||
| 		opts, | ||||
| 	) | ||||
| } | ||||
|  | ||||
| func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { | ||||
| 	if len(desc.variableLabels) != len(labelValues) { | ||||
| 		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) | ||||
| 	} | ||||
|  | ||||
| 	for _, n := range desc.variableLabels { | ||||
| 		if n == bucketLabel { | ||||
| 			panic(errBucketLabelNotAllowed) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, lp := range desc.constLabelPairs { | ||||
| 		if lp.GetName() == bucketLabel { | ||||
| 			panic(errBucketLabelNotAllowed) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(opts.Buckets) == 0 { | ||||
| 		opts.Buckets = DefBuckets | ||||
| 	} | ||||
|  | ||||
| 	h := &histogram{ | ||||
| 		desc:        desc, | ||||
| 		upperBounds: opts.Buckets, | ||||
| 		labelPairs:  MakeLabelPairs(desc, labelValues), | ||||
| 		counts:      [2]*histogramCounts{{}, {}}, | ||||
| 		now:         time.Now, | ||||
| 	} | ||||
| 	for i, upperBound := range h.upperBounds { | ||||
| 		if i < len(h.upperBounds)-1 { | ||||
| 			if upperBound >= h.upperBounds[i+1] { | ||||
| 				panic(fmt.Errorf( | ||||
| 					"histogram buckets must be in increasing order: %f >= %f", | ||||
| 					upperBound, h.upperBounds[i+1], | ||||
| 				)) | ||||
| 			} | ||||
| 		} else { | ||||
| 			if math.IsInf(upperBound, +1) { | ||||
| 				// The +Inf bucket is implicit. Remove it here. | ||||
| 				h.upperBounds = h.upperBounds[:i] | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// Finally we know the final length of h.upperBounds and can make buckets | ||||
| 	// for both counts as well as exemplars: | ||||
| 	h.counts[0].buckets = make([]uint64, len(h.upperBounds)) | ||||
| 	h.counts[1].buckets = make([]uint64, len(h.upperBounds)) | ||||
| 	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) | ||||
|  | ||||
| 	h.init(h) // Init self-collection. | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| type histogramCounts struct { | ||||
| 	// sumBits contains the bits of the float64 representing the sum of all | ||||
| 	// observations. sumBits and count have to go first in the struct to | ||||
| 	// guarantee alignment for atomic operations. | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	sumBits uint64 | ||||
| 	count   uint64 | ||||
| 	buckets []uint64 | ||||
| } | ||||
|  | ||||
| type histogram struct { | ||||
| 	// countAndHotIdx enables lock-free writes with use of atomic updates. | ||||
| 	// The most significant bit is the hot index [0 or 1] of the count field | ||||
| 	// below. Observe calls update the hot one. All remaining bits count the | ||||
| 	// number of Observe calls. Observe starts by incrementing this counter, | ||||
| 	// and finish by incrementing the count field in the respective | ||||
| 	// histogramCounts, as a marker for completion. | ||||
| 	// | ||||
| 	// Calls of the Write method (which are non-mutating reads from the | ||||
| 	// perspective of the histogram) swap the hot–cold under the writeMtx | ||||
| 	// lock. A cooldown is awaited (while locked) by comparing the number of | ||||
| 	// observations with the initiation count. Once they match, then the | ||||
| 	// last observation on the now cool one has completed. All cool fields must | ||||
| 	// be merged into the new hot before releasing writeMtx. | ||||
| 	// | ||||
| 	// Fields with atomic access first! See alignment constraint: | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	countAndHotIdx uint64 | ||||
|  | ||||
| 	selfCollector | ||||
| 	desc     *Desc | ||||
| 	writeMtx sync.Mutex // Only used in the Write method. | ||||
|  | ||||
| 	// Two counts, one is "hot" for lock-free observations, the other is | ||||
| 	// "cold" for writing out a dto.Metric. It has to be an array of | ||||
| 	// pointers to guarantee 64bit alignment of the histogramCounts, see | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. | ||||
| 	counts [2]*histogramCounts | ||||
|  | ||||
| 	upperBounds []float64 | ||||
| 	labelPairs  []*dto.LabelPair | ||||
| 	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. | ||||
|  | ||||
| 	now func() time.Time // To mock out time.Now() for testing. | ||||
| } | ||||
|  | ||||
| func (h *histogram) Desc() *Desc { | ||||
| 	return h.desc | ||||
| } | ||||
|  | ||||
| func (h *histogram) Observe(v float64) { | ||||
| 	h.observe(v, h.findBucket(v)) | ||||
| } | ||||
|  | ||||
| func (h *histogram) ObserveWithExemplar(v float64, e Labels) { | ||||
| 	i := h.findBucket(v) | ||||
| 	h.observe(v, i) | ||||
| 	h.updateExemplar(v, i, e) | ||||
| } | ||||
|  | ||||
| func (h *histogram) Write(out *dto.Metric) error { | ||||
| 	// For simplicity, we protect this whole method by a mutex. It is not in | ||||
| 	// the hot path, i.e. Observe is called much more often than Write. The | ||||
| 	// complication of making Write lock-free isn't worth it, if possible at | ||||
| 	// all. | ||||
| 	h.writeMtx.Lock() | ||||
| 	defer h.writeMtx.Unlock() | ||||
|  | ||||
| 	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) | ||||
| 	// without touching the count bits. See the struct comments for a full | ||||
| 	// description of the algorithm. | ||||
| 	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) | ||||
| 	// count is contained unchanged in the lower 63 bits. | ||||
| 	count := n & ((1 << 63) - 1) | ||||
| 	// The most significant bit tells us which counts is hot. The complement | ||||
| 	// is thus the cold one. | ||||
| 	hotCounts := h.counts[n>>63] | ||||
| 	coldCounts := h.counts[(^n)>>63] | ||||
|  | ||||
| 	// Await cooldown. | ||||
| 	for count != atomic.LoadUint64(&coldCounts.count) { | ||||
| 		runtime.Gosched() // Let observations get work done. | ||||
| 	} | ||||
|  | ||||
| 	his := &dto.Histogram{ | ||||
| 		Bucket:      make([]*dto.Bucket, len(h.upperBounds)), | ||||
| 		SampleCount: proto.Uint64(count), | ||||
| 		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), | ||||
| 	} | ||||
| 	var cumCount uint64 | ||||
| 	for i, upperBound := range h.upperBounds { | ||||
| 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) | ||||
| 		his.Bucket[i] = &dto.Bucket{ | ||||
| 			CumulativeCount: proto.Uint64(cumCount), | ||||
| 			UpperBound:      proto.Float64(upperBound), | ||||
| 		} | ||||
| 		if e := h.exemplars[i].Load(); e != nil { | ||||
| 			his.Bucket[i].Exemplar = e.(*dto.Exemplar) | ||||
| 		} | ||||
| 	} | ||||
| 	// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. | ||||
| 	if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { | ||||
| 		b := &dto.Bucket{ | ||||
| 			CumulativeCount: proto.Uint64(count), | ||||
| 			UpperBound:      proto.Float64(math.Inf(1)), | ||||
| 			Exemplar:        e.(*dto.Exemplar), | ||||
| 		} | ||||
| 		his.Bucket = append(his.Bucket, b) | ||||
| 	} | ||||
|  | ||||
| 	out.Histogram = his | ||||
| 	out.Label = h.labelPairs | ||||
|  | ||||
| 	// Finally add all the cold counts to the new hot counts and reset the cold counts. | ||||
| 	atomic.AddUint64(&hotCounts.count, count) | ||||
| 	atomic.StoreUint64(&coldCounts.count, 0) | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&hotCounts.sumBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) | ||||
| 		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | ||||
| 			atomic.StoreUint64(&coldCounts.sumBits, 0) | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	for i := range h.upperBounds { | ||||
| 		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) | ||||
| 		atomic.StoreUint64(&coldCounts.buckets[i], 0) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // findBucket returns the index of the bucket for the provided value, or | ||||
| // len(h.upperBounds) for the +Inf bucket. | ||||
| func (h *histogram) findBucket(v float64) int { | ||||
| 	// TODO(beorn7): For small numbers of buckets (<30), a linear search is | ||||
| 	// slightly faster than the binary search. If we really care, we could | ||||
| 	// switch from one search strategy to the other depending on the number | ||||
| 	// of buckets. | ||||
| 	// | ||||
| 	// Microbenchmarks (BenchmarkHistogramNoLabels): | ||||
| 	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op | ||||
| 	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op | ||||
| 	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op | ||||
| 	return sort.SearchFloat64s(h.upperBounds, v) | ||||
| } | ||||
|  | ||||
| // observe is the implementation for Observe without the findBucket part. | ||||
| func (h *histogram) observe(v float64, bucket int) { | ||||
| 	// We increment h.countAndHotIdx so that the counter in the lower | ||||
| 	// 63 bits gets incremented. At the same time, we get the new value | ||||
| 	// back, which we can use to find the currently-hot counts. | ||||
| 	n := atomic.AddUint64(&h.countAndHotIdx, 1) | ||||
| 	hotCounts := h.counts[n>>63] | ||||
|  | ||||
| 	if bucket < len(h.upperBounds) { | ||||
| 		atomic.AddUint64(&hotCounts.buckets[bucket], 1) | ||||
| 	} | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&hotCounts.sumBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | ||||
| 		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	// Increment count last as we take it as a signal that the observation | ||||
| 	// is complete. | ||||
| 	atomic.AddUint64(&hotCounts.count, 1) | ||||
| } | ||||
|  | ||||
| // updateExemplar replaces the exemplar for the provided bucket. With empty | ||||
| // labels, it's a no-op. It panics if any of the labels is invalid. | ||||
| func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { | ||||
| 	if l == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	e, err := newExemplar(v, h.now(), l) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	h.exemplars[bucket].Store(e) | ||||
| } | ||||
|  | ||||
| // HistogramVec is a Collector that bundles a set of Histograms that all share the | ||||
| // same Desc, but have different values for their variable labels. This is used | ||||
| // if you want to count the same thing partitioned by various dimensions | ||||
| // (e.g. HTTP request latencies, partitioned by status code and method). Create | ||||
| // instances with NewHistogramVec. | ||||
| type HistogramVec struct { | ||||
| 	*MetricVec | ||||
| } | ||||
|  | ||||
| // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and | ||||
| // partitioned by the given label names. | ||||
| func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		labelNames, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	return &HistogramVec{ | ||||
| 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { | ||||
| 			return newHistogram(desc, opts, lvs...) | ||||
| 		}), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetMetricWithLabelValues returns the Histogram for the given slice of label | ||||
| // values (same order as the variable labels in Desc). If that combination of | ||||
| // label values is accessed for the first time, a new Histogram is created. | ||||
| // | ||||
| // It is possible to call this method without using the returned Histogram to only | ||||
| // create the new Histogram but leave it at its starting value, a Histogram without | ||||
| // any observations. | ||||
| // | ||||
| // Keeping the Histogram for later use is possible (and should be considered if | ||||
| // performance is critical), but keep in mind that Reset, DeleteLabelValues and | ||||
| // Delete can be used to delete the Histogram from the HistogramVec. In that case, the | ||||
| // Histogram will still exist, but it will not be exported anymore, even if a | ||||
| // Histogram with the same label values is created later. See also the CounterVec | ||||
| // example. | ||||
| // | ||||
| // An error is returned if the number of label values is not the same as the | ||||
| // number of variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | ||||
| // an alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| // See also the GaugeVec example. | ||||
| func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Observer), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // GetMetricWith returns the Histogram for the given Labels map (the label names | ||||
| // must match those of the variable labels in Desc). If that label map is | ||||
| // accessed for the first time, a new Histogram is created. Implications of | ||||
| // creating a Histogram without using it and keeping the Histogram for later use | ||||
| // are the same as for GetMetricWithLabelValues. | ||||
| // | ||||
| // An error is returned if the number and names of the Labels are inconsistent | ||||
| // with those of the variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // This method is used for the same purpose as | ||||
| // GetMetricWithLabelValues(...string). See there for pros and cons of the two | ||||
| // methods. | ||||
| func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWith(labels) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Observer), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // WithLabelValues works as GetMetricWithLabelValues, but panics where | ||||
| // GetMetricWithLabelValues would have returned an error. Not returning an | ||||
| // error allows shortcuts like | ||||
| //     myVec.WithLabelValues("404", "GET").Observe(42.21) | ||||
| func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { | ||||
| 	h, err := v.GetMetricWithLabelValues(lvs...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // With works as GetMetricWith but panics where GetMetricWithLabels would have | ||||
| // returned an error. Not returning an error allows shortcuts like | ||||
| //     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) | ||||
| func (v *HistogramVec) With(labels Labels) Observer { | ||||
| 	h, err := v.GetMetricWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // CurryWith returns a vector curried with the provided labels, i.e. the | ||||
| // returned vector has those labels pre-set for all labeled operations performed | ||||
| // on it. The cardinality of the curried vector is reduced accordingly. The | ||||
| // order of the remaining labels stays the same (just with the curried labels | ||||
| // taken out of the sequence – which is relevant for the | ||||
| // (GetMetric)WithLabelValues methods). It is possible to curry a curried | ||||
| // vector, but only with labels not yet used for currying before. | ||||
| // | ||||
| // The metrics contained in the HistogramVec are shared between the curried and | ||||
| // uncurried vectors. They are just accessed differently. Curried and uncurried | ||||
| // vectors behave identically in terms of collection. Only one must be | ||||
| // registered with a given registry (usually the uncurried version). The Reset | ||||
| // method deletes all metrics, even if called on a curried vector. | ||||
| func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { | ||||
| 	vec, err := v.MetricVec.CurryWith(labels) | ||||
| 	if vec != nil { | ||||
| 		return &HistogramVec{vec}, err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // MustCurryWith works as CurryWith but panics where CurryWith would have | ||||
| // returned an error. | ||||
| func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { | ||||
| 	vec, err := v.CurryWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return vec | ||||
| } | ||||
|  | ||||
| type constHistogram struct { | ||||
| 	desc       *Desc | ||||
| 	count      uint64 | ||||
| 	sum        float64 | ||||
| 	buckets    map[float64]uint64 | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| func (h *constHistogram) Desc() *Desc { | ||||
| 	return h.desc | ||||
| } | ||||
|  | ||||
| func (h *constHistogram) Write(out *dto.Metric) error { | ||||
| 	his := &dto.Histogram{} | ||||
| 	buckets := make([]*dto.Bucket, 0, len(h.buckets)) | ||||
|  | ||||
| 	his.SampleCount = proto.Uint64(h.count) | ||||
| 	his.SampleSum = proto.Float64(h.sum) | ||||
|  | ||||
| 	for upperBound, count := range h.buckets { | ||||
| 		buckets = append(buckets, &dto.Bucket{ | ||||
| 			CumulativeCount: proto.Uint64(count), | ||||
| 			UpperBound:      proto.Float64(upperBound), | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	if len(buckets) > 0 { | ||||
| 		sort.Sort(buckSort(buckets)) | ||||
| 	} | ||||
| 	his.Bucket = buckets | ||||
|  | ||||
| 	out.Histogram = his | ||||
| 	out.Label = h.labelPairs | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // NewConstHistogram returns a metric representing a Prometheus histogram with | ||||
| // fixed values for the count, sum, and bucket counts. As those parameters | ||||
| // cannot be changed, the returned value does not implement the Histogram | ||||
| // interface (but only the Metric interface). Users of this package will not | ||||
| // have much use for it in regular operations. However, when implementing custom | ||||
| // Collectors, it is useful as a throw-away metric that is generated on the fly | ||||
| // to send it to Prometheus in the Collect method. | ||||
| // | ||||
| // buckets is a map of upper bounds to cumulative counts, excluding the +Inf | ||||
| // bucket. | ||||
| // | ||||
| // NewConstHistogram returns an error if the length of labelValues is not | ||||
| // consistent with the variable labels in Desc or if Desc is invalid. | ||||
| func NewConstHistogram( | ||||
| 	desc *Desc, | ||||
| 	count uint64, | ||||
| 	sum float64, | ||||
| 	buckets map[float64]uint64, | ||||
| 	labelValues ...string, | ||||
| ) (Metric, error) { | ||||
| 	if desc.err != nil { | ||||
| 		return nil, desc.err | ||||
| 	} | ||||
| 	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &constHistogram{ | ||||
| 		desc:       desc, | ||||
| 		count:      count, | ||||
| 		sum:        sum, | ||||
| 		buckets:    buckets, | ||||
| 		labelPairs: MakeLabelPairs(desc, labelValues), | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // MustNewConstHistogram is a version of NewConstHistogram that panics where | ||||
| // NewConstHistogram would have returned an error. | ||||
| func MustNewConstHistogram( | ||||
| 	desc *Desc, | ||||
| 	count uint64, | ||||
| 	sum float64, | ||||
| 	buckets map[float64]uint64, | ||||
| 	labelValues ...string, | ||||
| ) Metric { | ||||
| 	m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| type buckSort []*dto.Bucket | ||||
|  | ||||
| func (s buckSort) Len() int { | ||||
| 	return len(s) | ||||
| } | ||||
|  | ||||
| func (s buckSort) Swap(i, j int) { | ||||
| 	s[i], s[j] = s[j], s[i] | ||||
| } | ||||
|  | ||||
| func (s buckSort) Less(i, j int) bool { | ||||
| 	return s[i].GetUpperBound() < s[j].GetUpperBound() | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,85 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"sort" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // metricSorter is a sortable slice of *dto.Metric. | ||||
| type metricSorter []*dto.Metric | ||||
|  | ||||
| func (s metricSorter) Len() int { | ||||
| 	return len(s) | ||||
| } | ||||
|  | ||||
| func (s metricSorter) Swap(i, j int) { | ||||
| 	s[i], s[j] = s[j], s[i] | ||||
| } | ||||
|  | ||||
| func (s metricSorter) Less(i, j int) bool { | ||||
| 	if len(s[i].Label) != len(s[j].Label) { | ||||
| 		// This should not happen. The metrics are | ||||
| 		// inconsistent. However, we have to deal with the fact, as | ||||
| 		// people might use custom collectors or metric family injection | ||||
| 		// to create inconsistent metrics. So let's simply compare the | ||||
| 		// number of labels in this case. That will still yield | ||||
| 		// reproducible sorting. | ||||
| 		return len(s[i].Label) < len(s[j].Label) | ||||
| 	} | ||||
| 	for n, lp := range s[i].Label { | ||||
| 		vi := lp.GetValue() | ||||
| 		vj := s[j].Label[n].GetValue() | ||||
| 		if vi != vj { | ||||
| 			return vi < vj | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// We should never arrive here. Multiple metrics with the same | ||||
| 	// label set in the same scrape will lead to undefined ingestion | ||||
| 	// behavior. However, as above, we have to provide stable sorting | ||||
| 	// here, even for inconsistent metrics. So sort equal metrics | ||||
| 	// by their timestamp, with missing timestamps (implying "now") | ||||
| 	// coming last. | ||||
| 	if s[i].TimestampMs == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if s[j].TimestampMs == nil { | ||||
| 		return true | ||||
| 	} | ||||
| 	return s[i].GetTimestampMs() < s[j].GetTimestampMs() | ||||
| } | ||||
|  | ||||
| // NormalizeMetricFamilies returns a MetricFamily slice with empty | ||||
| // MetricFamilies pruned and the remaining MetricFamilies sorted by name within | ||||
| // the slice, with the contained Metrics sorted within each MetricFamily. | ||||
| func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { | ||||
| 	for _, mf := range metricFamiliesByName { | ||||
| 		sort.Sort(metricSorter(mf.Metric)) | ||||
| 	} | ||||
| 	names := make([]string, 0, len(metricFamiliesByName)) | ||||
| 	for name, mf := range metricFamiliesByName { | ||||
| 		if len(mf.Metric) > 0 { | ||||
| 			names = append(names, name) | ||||
| 		} | ||||
| 	} | ||||
| 	sort.Strings(names) | ||||
| 	result := make([]*dto.MetricFamily, 0, len(names)) | ||||
| 	for _, name := range names { | ||||
| 		result = append(result, metricFamiliesByName[name]) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										87
									
								
								vendor/github.com/prometheus/client_golang/prometheus/labels.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										87
									
								
								vendor/github.com/prometheus/client_golang/prometheus/labels.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,87 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/prometheus/common/model" | ||||
| ) | ||||
|  | ||||
| // Labels represents a collection of label name -> value mappings. This type is | ||||
| // commonly used with the With(Labels) and GetMetricWith(Labels) methods of | ||||
| // metric vector Collectors, e.g.: | ||||
| //     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) | ||||
| // | ||||
| // The other use-case is the specification of constant label pairs in Opts or to | ||||
| // create a Desc. | ||||
| type Labels map[string]string | ||||
|  | ||||
| // reservedLabelPrefix is a prefix which is not legal in user-supplied | ||||
| // label names. | ||||
| const reservedLabelPrefix = "__" | ||||
|  | ||||
| var errInconsistentCardinality = errors.New("inconsistent label cardinality") | ||||
|  | ||||
| func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { | ||||
| 	return fmt.Errorf( | ||||
| 		"%s: %q has %d variable labels named %q but %d values %q were provided", | ||||
| 		errInconsistentCardinality, fqName, | ||||
| 		len(labels), labels, | ||||
| 		len(labelValues), labelValues, | ||||
| 	) | ||||
| } | ||||
|  | ||||
| func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { | ||||
| 	if len(labels) != expectedNumberOfValues { | ||||
| 		return fmt.Errorf( | ||||
| 			"%s: expected %d label values but got %d in %#v", | ||||
| 			errInconsistentCardinality, expectedNumberOfValues, | ||||
| 			len(labels), labels, | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	for name, val := range labels { | ||||
| 		if !utf8.ValidString(val) { | ||||
| 			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func validateLabelValues(vals []string, expectedNumberOfValues int) error { | ||||
| 	if len(vals) != expectedNumberOfValues { | ||||
| 		return fmt.Errorf( | ||||
| 			"%s: expected %d label values but got %d in %#v", | ||||
| 			errInconsistentCardinality, expectedNumberOfValues, | ||||
| 			len(vals), vals, | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	for _, val := range vals { | ||||
| 		if !utf8.ValidString(val) { | ||||
| 			return fmt.Errorf("label value %q is not valid UTF-8", val) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func checkLabelName(l string) bool { | ||||
| 	return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) | ||||
| } | ||||
							
								
								
									
										176
									
								
								vendor/github.com/prometheus/client_golang/prometheus/metric.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										176
									
								
								vendor/github.com/prometheus/client_golang/prometheus/metric.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,176 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/prometheus/common/model" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. | ||||
|  | ||||
| // A Metric models a single sample value with its meta data being exported to | ||||
| // Prometheus. Implementations of Metric in this package are Gauge, Counter, | ||||
| // Histogram, Summary, and Untyped. | ||||
| type Metric interface { | ||||
| 	// Desc returns the descriptor for the Metric. This method idempotently | ||||
| 	// returns the same descriptor throughout the lifetime of the | ||||
| 	// Metric. The returned descriptor is immutable by contract. A Metric | ||||
| 	// unable to describe itself must return an invalid descriptor (created | ||||
| 	// with NewInvalidDesc). | ||||
| 	Desc() *Desc | ||||
| 	// Write encodes the Metric into a "Metric" Protocol Buffer data | ||||
| 	// transmission object. | ||||
| 	// | ||||
| 	// Metric implementations must observe concurrency safety as reads of | ||||
| 	// this metric may occur at any time, and any blocking occurs at the | ||||
| 	// expense of total performance of rendering all registered | ||||
| 	// metrics. Ideally, Metric implementations should support concurrent | ||||
| 	// readers. | ||||
| 	// | ||||
| 	// While populating dto.Metric, it is the responsibility of the | ||||
| 	// implementation to ensure validity of the Metric protobuf (like valid | ||||
| 	// UTF-8 strings or syntactically valid metric and label names). It is | ||||
| 	// recommended to sort labels lexicographically. Callers of Write should | ||||
| 	// still make sure of sorting if they depend on it. | ||||
| 	Write(*dto.Metric) error | ||||
| 	// TODO(beorn7): The original rationale of passing in a pre-allocated | ||||
| 	// dto.Metric protobuf to save allocations has disappeared. The | ||||
| 	// signature of this method should be changed to "Write() (*dto.Metric, | ||||
| 	// error)". | ||||
| } | ||||
|  | ||||
| // Opts bundles the options for creating most Metric types. Each metric | ||||
| // implementation XXX has its own XXXOpts type, but in most cases, it is just be | ||||
| // an alias of this type (which might change when the requirement arises.) | ||||
| // | ||||
| // It is mandatory to set Name to a non-empty string. All other fields are | ||||
| // optional and can safely be left at their zero value, although it is strongly | ||||
| // encouraged to set a Help string. | ||||
| type Opts struct { | ||||
| 	// Namespace, Subsystem, and Name are components of the fully-qualified | ||||
| 	// name of the Metric (created by joining these components with | ||||
| 	// "_"). Only Name is mandatory, the others merely help structuring the | ||||
| 	// name. Note that the fully-qualified name of the metric must be a | ||||
| 	// valid Prometheus metric name. | ||||
| 	Namespace string | ||||
| 	Subsystem string | ||||
| 	Name      string | ||||
|  | ||||
| 	// Help provides information about this metric. | ||||
| 	// | ||||
| 	// Metrics with the same fully-qualified name must have the same Help | ||||
| 	// string. | ||||
| 	Help string | ||||
|  | ||||
| 	// ConstLabels are used to attach fixed labels to this metric. Metrics | ||||
| 	// with the same fully-qualified name must have the same label names in | ||||
| 	// their ConstLabels. | ||||
| 	// | ||||
| 	// ConstLabels are only used rarely. In particular, do not use them to | ||||
| 	// attach the same labels to all your metrics. Those use cases are | ||||
| 	// better covered by target labels set by the scraping Prometheus | ||||
| 	// server, or by one specific metric (e.g. a build_info or a | ||||
| 	// machine_role metric). See also | ||||
| 	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels | ||||
| 	ConstLabels Labels | ||||
| } | ||||
|  | ||||
| // BuildFQName joins the given three name components by "_". Empty name | ||||
| // components are ignored. If the name parameter itself is empty, an empty | ||||
| // string is returned, no matter what. Metric implementations included in this | ||||
| // library use this function internally to generate the fully-qualified metric | ||||
| // name from the name component in their Opts. Users of the library will only | ||||
| // need this function if they implement their own Metric or instantiate a Desc | ||||
| // (with NewDesc) directly. | ||||
| func BuildFQName(namespace, subsystem, name string) string { | ||||
| 	if name == "" { | ||||
| 		return "" | ||||
| 	} | ||||
| 	switch { | ||||
| 	case namespace != "" && subsystem != "": | ||||
| 		return strings.Join([]string{namespace, subsystem, name}, "_") | ||||
| 	case namespace != "": | ||||
| 		return strings.Join([]string{namespace, name}, "_") | ||||
| 	case subsystem != "": | ||||
| 		return strings.Join([]string{subsystem, name}, "_") | ||||
| 	} | ||||
| 	return name | ||||
| } | ||||
|  | ||||
| // labelPairSorter implements sort.Interface. It is used to sort a slice of | ||||
| // dto.LabelPair pointers. | ||||
| type labelPairSorter []*dto.LabelPair | ||||
|  | ||||
| func (s labelPairSorter) Len() int { | ||||
| 	return len(s) | ||||
| } | ||||
|  | ||||
| func (s labelPairSorter) Swap(i, j int) { | ||||
| 	s[i], s[j] = s[j], s[i] | ||||
| } | ||||
|  | ||||
| func (s labelPairSorter) Less(i, j int) bool { | ||||
| 	return s[i].GetName() < s[j].GetName() | ||||
| } | ||||
|  | ||||
| type invalidMetric struct { | ||||
| 	desc *Desc | ||||
| 	err  error | ||||
| } | ||||
|  | ||||
| // NewInvalidMetric returns a metric whose Write method always returns the | ||||
| // provided error. It is useful if a Collector finds itself unable to collect | ||||
| // a metric and wishes to report an error to the registry. | ||||
| func NewInvalidMetric(desc *Desc, err error) Metric { | ||||
| 	return &invalidMetric{desc, err} | ||||
| } | ||||
|  | ||||
| func (m *invalidMetric) Desc() *Desc { return m.desc } | ||||
|  | ||||
| func (m *invalidMetric) Write(*dto.Metric) error { return m.err } | ||||
|  | ||||
| type timestampedMetric struct { | ||||
| 	Metric | ||||
| 	t time.Time | ||||
| } | ||||
|  | ||||
| func (m timestampedMetric) Write(pb *dto.Metric) error { | ||||
| 	e := m.Metric.Write(pb) | ||||
| 	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| // NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a | ||||
| // way that it has an explicit timestamp set to the provided Time. This is only | ||||
| // useful in rare cases as the timestamp of a Prometheus metric should usually | ||||
| // be set by the Prometheus server during scraping. Exceptions include mirroring | ||||
| // metrics with given timestamps from other metric | ||||
| // sources. | ||||
| // | ||||
| // NewMetricWithTimestamp works best with MustNewConstMetric, | ||||
| // MustNewConstHistogram, and MustNewConstSummary, see example. | ||||
| // | ||||
| // Currently, the exposition formats used by Prometheus are limited to | ||||
| // millisecond resolution. Thus, the provided time will be rounded down to the | ||||
| // next full millisecond value. | ||||
| func NewMetricWithTimestamp(t time.Time, m Metric) Metric { | ||||
| 	return timestampedMetric{Metric: m, t: t} | ||||
| } | ||||
							
								
								
									
										64
									
								
								vendor/github.com/prometheus/client_golang/prometheus/observer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/prometheus/client_golang/prometheus/observer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,64 +0,0 @@ | ||||
| // Copyright 2017 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| // Observer is the interface that wraps the Observe method, which is used by | ||||
| // Histogram and Summary to add observations. | ||||
| type Observer interface { | ||||
| 	Observe(float64) | ||||
| } | ||||
|  | ||||
| // The ObserverFunc type is an adapter to allow the use of ordinary | ||||
| // functions as Observers. If f is a function with the appropriate | ||||
| // signature, ObserverFunc(f) is an Observer that calls f. | ||||
| // | ||||
| // This adapter is usually used in connection with the Timer type, and there are | ||||
| // two general use cases: | ||||
| // | ||||
| // The most common one is to use a Gauge as the Observer for a Timer. | ||||
| // See the "Gauge" Timer example. | ||||
| // | ||||
| // The more advanced use case is to create a function that dynamically decides | ||||
| // which Observer to use for observing the duration. See the "Complex" Timer | ||||
| // example. | ||||
| type ObserverFunc func(float64) | ||||
|  | ||||
| // Observe calls f(value). It implements Observer. | ||||
| func (f ObserverFunc) Observe(value float64) { | ||||
| 	f(value) | ||||
| } | ||||
|  | ||||
| // ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. | ||||
| type ObserverVec interface { | ||||
| 	GetMetricWith(Labels) (Observer, error) | ||||
| 	GetMetricWithLabelValues(lvs ...string) (Observer, error) | ||||
| 	With(Labels) Observer | ||||
| 	WithLabelValues(...string) Observer | ||||
| 	CurryWith(Labels) (ObserverVec, error) | ||||
| 	MustCurryWith(Labels) ObserverVec | ||||
|  | ||||
| 	Collector | ||||
| } | ||||
|  | ||||
| // ExemplarObserver is implemented by Observers that offer the option of | ||||
| // observing a value together with an exemplar. Its ObserveWithExemplar method | ||||
| // works like the Observe method of an Observer but also replaces the currently | ||||
| // saved exemplar (if any) with a new one, created from the provided value, the | ||||
| // current time as timestamp, and the provided Labels. Empty Labels will lead to | ||||
| // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is | ||||
| // left in place. ObserveWithExemplar panics if any of the provided labels are | ||||
| // invalid or if the provided labels contain more than 64 runes in total. | ||||
| type ExemplarObserver interface { | ||||
| 	ObserveWithExemplar(value float64, exemplar Labels) | ||||
| } | ||||
							
								
								
									
										172
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										172
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,172 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type processCollector struct { | ||||
| 	collectFn       func(chan<- Metric) | ||||
| 	pidFn           func() (int, error) | ||||
| 	reportErrors    bool | ||||
| 	cpuTotal        *Desc | ||||
| 	openFDs, maxFDs *Desc | ||||
| 	vsize, maxVsize *Desc | ||||
| 	rss             *Desc | ||||
| 	startTime       *Desc | ||||
| } | ||||
|  | ||||
| // ProcessCollectorOpts defines the behavior of a process metrics collector | ||||
| // created with NewProcessCollector. | ||||
| type ProcessCollectorOpts struct { | ||||
| 	// PidFn returns the PID of the process the collector collects metrics | ||||
| 	// for. It is called upon each collection. By default, the PID of the | ||||
| 	// current process is used, as determined on construction time by | ||||
| 	// calling os.Getpid(). | ||||
| 	PidFn func() (int, error) | ||||
| 	// If non-empty, each of the collected metrics is prefixed by the | ||||
| 	// provided string and an underscore ("_"). | ||||
| 	Namespace string | ||||
| 	// If true, any error encountered during collection is reported as an | ||||
| 	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored | ||||
| 	// and the collected metrics will be incomplete. (Possibly, no metrics | ||||
| 	// will be collected at all.) While that's usually not desired, it is | ||||
| 	// appropriate for the common "mix-in" of process metrics, where process | ||||
| 	// metrics are nice to have, but failing to collect them should not | ||||
| 	// disrupt the collection of the remaining metrics. | ||||
| 	ReportErrors bool | ||||
| } | ||||
|  | ||||
| // NewProcessCollector returns a collector which exports the current state of | ||||
| // process metrics including CPU, memory and file descriptor usage as well as | ||||
| // the process start time. The detailed behavior is defined by the provided | ||||
| // ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a | ||||
| // collector for the current process with an empty namespace string and no error | ||||
| // reporting. | ||||
| // | ||||
| // The collector only works on operating systems with a Linux-style proc | ||||
| // filesystem and on Microsoft Windows. On other operating systems, it will not | ||||
| // collect any metrics. | ||||
| func NewProcessCollector(opts ProcessCollectorOpts) Collector { | ||||
| 	ns := "" | ||||
| 	if len(opts.Namespace) > 0 { | ||||
| 		ns = opts.Namespace + "_" | ||||
| 	} | ||||
|  | ||||
| 	c := &processCollector{ | ||||
| 		reportErrors: opts.ReportErrors, | ||||
| 		cpuTotal: NewDesc( | ||||
| 			ns+"process_cpu_seconds_total", | ||||
| 			"Total user and system CPU time spent in seconds.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		openFDs: NewDesc( | ||||
| 			ns+"process_open_fds", | ||||
| 			"Number of open file descriptors.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		maxFDs: NewDesc( | ||||
| 			ns+"process_max_fds", | ||||
| 			"Maximum number of open file descriptors.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		vsize: NewDesc( | ||||
| 			ns+"process_virtual_memory_bytes", | ||||
| 			"Virtual memory size in bytes.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		maxVsize: NewDesc( | ||||
| 			ns+"process_virtual_memory_max_bytes", | ||||
| 			"Maximum amount of virtual memory available in bytes.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		rss: NewDesc( | ||||
| 			ns+"process_resident_memory_bytes", | ||||
| 			"Resident memory size in bytes.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 		startTime: NewDesc( | ||||
| 			ns+"process_start_time_seconds", | ||||
| 			"Start time of the process since unix epoch in seconds.", | ||||
| 			nil, nil, | ||||
| 		), | ||||
| 	} | ||||
|  | ||||
| 	if opts.PidFn == nil { | ||||
| 		pid := os.Getpid() | ||||
| 		c.pidFn = func() (int, error) { return pid, nil } | ||||
| 	} else { | ||||
| 		c.pidFn = opts.PidFn | ||||
| 	} | ||||
|  | ||||
| 	// Set up process metric collection if supported by the runtime. | ||||
| 	if canCollectProcess() { | ||||
| 		c.collectFn = c.processCollect | ||||
| 	} else { | ||||
| 		c.collectFn = func(ch chan<- Metric) { | ||||
| 			c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // Describe returns all descriptions of the collector. | ||||
| func (c *processCollector) Describe(ch chan<- *Desc) { | ||||
| 	ch <- c.cpuTotal | ||||
| 	ch <- c.openFDs | ||||
| 	ch <- c.maxFDs | ||||
| 	ch <- c.vsize | ||||
| 	ch <- c.maxVsize | ||||
| 	ch <- c.rss | ||||
| 	ch <- c.startTime | ||||
| } | ||||
|  | ||||
| // Collect returns the current state of all metrics of the collector. | ||||
| func (c *processCollector) Collect(ch chan<- Metric) { | ||||
| 	c.collectFn(ch) | ||||
| } | ||||
|  | ||||
| func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { | ||||
| 	if !c.reportErrors { | ||||
| 		return | ||||
| 	} | ||||
| 	if desc == nil { | ||||
| 		desc = NewInvalidDesc(err) | ||||
| 	} | ||||
| 	ch <- NewInvalidMetric(desc, err) | ||||
| } | ||||
|  | ||||
| // NewPidFileFn returns a function that retrieves a pid from the specified file. | ||||
| // It is meant to be used for the PidFn field in ProcessCollectorOpts. | ||||
| func NewPidFileFn(pidFilePath string) func() (int, error) { | ||||
| 	return func() (int, error) { | ||||
| 		content, err := ioutil.ReadFile(pidFilePath) | ||||
| 		if err != nil { | ||||
| 			return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) | ||||
| 		} | ||||
| 		pid, err := strconv.Atoi(strings.TrimSpace(string(content))) | ||||
| 		if err != nil { | ||||
| 			return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) | ||||
| 		} | ||||
|  | ||||
| 		return pid, nil | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										65
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										65
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,65 +0,0 @@ | ||||
| // Copyright 2019 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // +build !windows | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"github.com/prometheus/procfs" | ||||
| ) | ||||
|  | ||||
| func canCollectProcess() bool { | ||||
| 	_, err := procfs.NewDefaultFS() | ||||
| 	return err == nil | ||||
| } | ||||
|  | ||||
| func (c *processCollector) processCollect(ch chan<- Metric) { | ||||
| 	pid, err := c.pidFn() | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	p, err := procfs.NewProc(pid) | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if stat, err := p.Stat(); err == nil { | ||||
| 		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) | ||||
| 		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) | ||||
| 		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) | ||||
| 		if startTime, err := stat.StartTime(); err == nil { | ||||
| 			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) | ||||
| 		} else { | ||||
| 			c.reportError(ch, c.startTime, err) | ||||
| 		} | ||||
| 	} else { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 	} | ||||
|  | ||||
| 	if fds, err := p.FileDescriptorsLen(); err == nil { | ||||
| 		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) | ||||
| 	} else { | ||||
| 		c.reportError(ch, c.openFDs, err) | ||||
| 	} | ||||
|  | ||||
| 	if limits, err := p.Limits(); err == nil { | ||||
| 		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) | ||||
| 		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) | ||||
| 	} else { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										116
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										116
									
								
								vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,116 +0,0 @@ | ||||
| // Copyright 2019 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"syscall" | ||||
| 	"unsafe" | ||||
|  | ||||
| 	"golang.org/x/sys/windows" | ||||
| ) | ||||
|  | ||||
| func canCollectProcess() bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	modpsapi    = syscall.NewLazyDLL("psapi.dll") | ||||
| 	modkernel32 = syscall.NewLazyDLL("kernel32.dll") | ||||
|  | ||||
| 	procGetProcessMemoryInfo  = modpsapi.NewProc("GetProcessMemoryInfo") | ||||
| 	procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") | ||||
| ) | ||||
|  | ||||
| type processMemoryCounters struct { | ||||
| 	// System interface description | ||||
| 	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex | ||||
|  | ||||
| 	// Refer to the Golang internal implementation | ||||
| 	// https://golang.org/src/internal/syscall/windows/psapi_windows.go | ||||
| 	_                          uint32 | ||||
| 	PageFaultCount             uint32 | ||||
| 	PeakWorkingSetSize         uintptr | ||||
| 	WorkingSetSize             uintptr | ||||
| 	QuotaPeakPagedPoolUsage    uintptr | ||||
| 	QuotaPagedPoolUsage        uintptr | ||||
| 	QuotaPeakNonPagedPoolUsage uintptr | ||||
| 	QuotaNonPagedPoolUsage     uintptr | ||||
| 	PagefileUsage              uintptr | ||||
| 	PeakPagefileUsage          uintptr | ||||
| 	PrivateUsage               uintptr | ||||
| } | ||||
|  | ||||
| func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { | ||||
| 	mem := processMemoryCounters{} | ||||
| 	r1, _, err := procGetProcessMemoryInfo.Call( | ||||
| 		uintptr(handle), | ||||
| 		uintptr(unsafe.Pointer(&mem)), | ||||
| 		uintptr(unsafe.Sizeof(mem)), | ||||
| 	) | ||||
| 	if r1 != 1 { | ||||
| 		return mem, err | ||||
| 	} else { | ||||
| 		return mem, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getProcessHandleCount(handle windows.Handle) (uint32, error) { | ||||
| 	var count uint32 | ||||
| 	r1, _, err := procGetProcessHandleCount.Call( | ||||
| 		uintptr(handle), | ||||
| 		uintptr(unsafe.Pointer(&count)), | ||||
| 	) | ||||
| 	if r1 != 1 { | ||||
| 		return 0, err | ||||
| 	} else { | ||||
| 		return count, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *processCollector) processCollect(ch chan<- Metric) { | ||||
| 	h, err := windows.GetCurrentProcess() | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	var startTime, exitTime, kernelTime, userTime windows.Filetime | ||||
| 	err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
| 	ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) | ||||
| 	ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) | ||||
|  | ||||
| 	mem, err := getProcessMemoryInfo(h) | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
| 	ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) | ||||
| 	ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) | ||||
|  | ||||
| 	handles, err := getProcessHandleCount(h) | ||||
| 	if err != nil { | ||||
| 		c.reportError(ch, nil, err) | ||||
| 		return | ||||
| 	} | ||||
| 	ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) | ||||
| 	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. | ||||
| } | ||||
|  | ||||
| func fileTimeToSeconds(ft windows.Filetime) float64 { | ||||
| 	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 | ||||
| } | ||||
							
								
								
									
										376
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										376
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,376 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Package promauto provides alternative constructors for the fundamental | ||||
| // Prometheus metric types and their …Vec and …Func variants. The difference to | ||||
| // their counterparts in the prometheus package is that the promauto | ||||
| // constructors return Collectors that are already registered with a | ||||
| // registry. There are two sets of constructors. The constructors in the first | ||||
| // set are top-level functions, while the constructors in the other set are | ||||
| // methods of the Factory type. The top-level function return Collectors | ||||
| // registered with the global registry (prometheus.DefaultRegisterer), while the | ||||
| // methods return Collectors registered with the registry the Factory was | ||||
| // constructed with. All constructors panic if the registration fails. | ||||
| // | ||||
| // The following example is a complete program to create a histogram of normally | ||||
| // distributed random numbers from the math/rand package: | ||||
| // | ||||
| //      package main | ||||
| // | ||||
| //      import ( | ||||
| //              "math/rand" | ||||
| //              "net/http" | ||||
| // | ||||
| //              "github.com/prometheus/client_golang/prometheus" | ||||
| //              "github.com/prometheus/client_golang/prometheus/promauto" | ||||
| //              "github.com/prometheus/client_golang/prometheus/promhttp" | ||||
| //      ) | ||||
| // | ||||
| //      var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ | ||||
| //              Name:    "random_numbers", | ||||
| //              Help:    "A histogram of normally distributed random numbers.", | ||||
| //              Buckets: prometheus.LinearBuckets(-3, .1, 61), | ||||
| //      }) | ||||
| // | ||||
| //      func Random() { | ||||
| //              for { | ||||
| //                      histogram.Observe(rand.NormFloat64()) | ||||
| //              } | ||||
| //      } | ||||
| // | ||||
| //      func main() { | ||||
| //              go Random() | ||||
| //              http.Handle("/metrics", promhttp.Handler()) | ||||
| //              http.ListenAndServe(":1971", nil) | ||||
| //      } | ||||
| // | ||||
| // Prometheus's version of a minimal hello-world program: | ||||
| // | ||||
| //      package main | ||||
| // | ||||
| //      import ( | ||||
| //      	"fmt" | ||||
| //      	"net/http" | ||||
| // | ||||
| //      	"github.com/prometheus/client_golang/prometheus" | ||||
| //      	"github.com/prometheus/client_golang/prometheus/promauto" | ||||
| //      	"github.com/prometheus/client_golang/prometheus/promhttp" | ||||
| //      ) | ||||
| // | ||||
| //      func main() { | ||||
| //      	http.Handle("/", promhttp.InstrumentHandlerCounter( | ||||
| //      		promauto.NewCounterVec( | ||||
| //      			prometheus.CounterOpts{ | ||||
| //      				Name: "hello_requests_total", | ||||
| //      				Help: "Total number of hello-world requests by HTTP code.", | ||||
| //      			}, | ||||
| //      			[]string{"code"}, | ||||
| //      		), | ||||
| //      		http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| //      			fmt.Fprint(w, "Hello, world!") | ||||
| //      		}), | ||||
| //      	)) | ||||
| //      	http.Handle("/metrics", promhttp.Handler()) | ||||
| //      	http.ListenAndServe(":1971", nil) | ||||
| //      } | ||||
| // | ||||
| // A Factory is created with the With(prometheus.Registerer) function, which | ||||
| // enables two usage pattern. With(prometheus.Registerer) can be called once per | ||||
| // line: | ||||
| // | ||||
| //        var ( | ||||
| //        	reg           = prometheus.NewRegistry() | ||||
| //        	randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ | ||||
| //        		Name:    "random_numbers", | ||||
| //        		Help:    "A histogram of normally distributed random numbers.", | ||||
| //        		Buckets: prometheus.LinearBuckets(-3, .1, 61), | ||||
| //        	}) | ||||
| //        	requestCount = promauto.With(reg).NewCounterVec( | ||||
| //        		prometheus.CounterOpts{ | ||||
| //        			Name: "http_requests_total", | ||||
| //        			Help: "Total number of HTTP requests by status code and method.", | ||||
| //        		}, | ||||
| //        		[]string{"code", "method"}, | ||||
| //        	) | ||||
| //        ) | ||||
| // | ||||
| // Or it can be used to create a Factory once to be used multiple times: | ||||
| // | ||||
| //        var ( | ||||
| //        	reg           = prometheus.NewRegistry() | ||||
| //        	factory       = promauto.With(reg) | ||||
| //        	randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ | ||||
| //        		Name:    "random_numbers", | ||||
| //        		Help:    "A histogram of normally distributed random numbers.", | ||||
| //        		Buckets: prometheus.LinearBuckets(-3, .1, 61), | ||||
| //        	}) | ||||
| //        	requestCount = factory.NewCounterVec( | ||||
| //        		prometheus.CounterOpts{ | ||||
| //        			Name: "http_requests_total", | ||||
| //        			Help: "Total number of HTTP requests by status code and method.", | ||||
| //        		}, | ||||
| //        		[]string{"code", "method"}, | ||||
| //        	) | ||||
| //        ) | ||||
| // | ||||
| // This appears very handy. So why are these constructors locked away in a | ||||
| // separate package? | ||||
| // | ||||
| // The main problem is that registration may fail, e.g. if a metric inconsistent | ||||
| // with or equal to the newly to be registered one is already registered. | ||||
| // Therefore, the Register method in the prometheus.Registerer interface returns | ||||
| // an error, and the same is the case for the top-level prometheus.Register | ||||
| // function that registers with the global registry. The prometheus package also | ||||
| // provides MustRegister versions for both. They panic if the registration | ||||
| // fails, and they clearly call this out by using the Must…  idiom. Panicking is | ||||
| // problematic in this case because it doesn't just happen on input provided by | ||||
| // the caller that is invalid on its own. Things are a bit more subtle here: | ||||
| // Metric creation and registration tend to be spread widely over the | ||||
| // codebase. It can easily happen that an incompatible metric is added to an | ||||
| // unrelated part of the code, and suddenly code that used to work perfectly | ||||
| // fine starts to panic (provided that the registration of the newly added | ||||
| // metric happens before the registration of the previously existing | ||||
| // metric). This may come as an even bigger surprise with the global registry, | ||||
| // where simply importing another package can trigger a panic (if the newly | ||||
| // imported package registers metrics in its init function). At least, in the | ||||
| // prometheus package, creation of metrics and other collectors is separate from | ||||
| // registration. You first create the metric, and then you decide explicitly if | ||||
| // you want to register it with a local or the global registry, and if you want | ||||
| // to handle the error or risk a panic. With the constructors in the promauto | ||||
| // package, registration is automatic, and if it fails, it will always | ||||
| // panic. Furthermore, the constructors will often be called in the var section | ||||
| // of a file, which means that panicking will happen as a side effect of merely | ||||
| // importing a package. | ||||
| // | ||||
| // A separate package allows conservative users to entirely ignore it. And | ||||
| // whoever wants to use it, will do so explicitly, with an opportunity to read | ||||
| // this warning. | ||||
| // | ||||
| // Enjoy promauto responsibly! | ||||
| package promauto | ||||
|  | ||||
| import "github.com/prometheus/client_golang/prometheus" | ||||
|  | ||||
| // NewCounter works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Counter with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. | ||||
| func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { | ||||
| 	return With(prometheus.DefaultRegisterer).NewCounter(opts) | ||||
| } | ||||
|  | ||||
| // NewCounterVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the CounterVec with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewCounterVec | ||||
| // panics. | ||||
| func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { | ||||
| 	return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) | ||||
| } | ||||
|  | ||||
| // NewCounterFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the CounterFunc with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc | ||||
| // panics. | ||||
| func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { | ||||
| 	return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) | ||||
| } | ||||
|  | ||||
| // NewGauge works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Gauge with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. | ||||
| func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { | ||||
| 	return With(prometheus.DefaultRegisterer).NewGauge(opts) | ||||
| } | ||||
|  | ||||
| // NewGaugeVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the GaugeVec with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. | ||||
| func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { | ||||
| 	return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) | ||||
| } | ||||
|  | ||||
| // NewGaugeFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the GaugeFunc with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. | ||||
| func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { | ||||
| 	return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) | ||||
| } | ||||
|  | ||||
| // NewSummary works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Summary with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. | ||||
| func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { | ||||
| 	return With(prometheus.DefaultRegisterer).NewSummary(opts) | ||||
| } | ||||
|  | ||||
| // NewSummaryVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the SummaryVec with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec | ||||
| // panics. | ||||
| func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { | ||||
| 	return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) | ||||
| } | ||||
|  | ||||
| // NewHistogram works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the Histogram with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. | ||||
| func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { | ||||
| 	return With(prometheus.DefaultRegisterer).NewHistogram(opts) | ||||
| } | ||||
|  | ||||
| // NewHistogramVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the HistogramVec with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec | ||||
| // panics. | ||||
| func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { | ||||
| 	return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) | ||||
| } | ||||
|  | ||||
| // NewUntypedFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the UntypedFunc with the | ||||
| // prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc | ||||
| // panics. | ||||
| func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { | ||||
| 	return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) | ||||
| } | ||||
|  | ||||
| // Factory provides factory methods to create Collectors that are automatically | ||||
| // registered with a Registerer. Create a Factory with the With function, | ||||
| // providing a Registerer to auto-register created Collectors with. The zero | ||||
| // value of a Factory creates Collectors that are not registered with any | ||||
| // Registerer. All methods of the Factory panic if the registration fails. | ||||
| type Factory struct { | ||||
| 	r prometheus.Registerer | ||||
| } | ||||
|  | ||||
| // With creates a Factory using the provided Registerer for registration of the | ||||
| // created Collectors. If the provided Registerer is nil, the returned Factory | ||||
| // creates Collectors that are not registered with any Registerer. | ||||
| func With(r prometheus.Registerer) Factory { return Factory{r} } | ||||
|  | ||||
| // NewCounter works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Counter with the Factory's Registerer. | ||||
| func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { | ||||
| 	c := prometheus.NewCounter(opts) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(c) | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // NewCounterVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the CounterVec with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { | ||||
| 	c := prometheus.NewCounterVec(opts, labelNames) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(c) | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // NewCounterFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the CounterFunc with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { | ||||
| 	c := prometheus.NewCounterFunc(opts, function) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(c) | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // NewGauge works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Gauge with the Factory's Registerer. | ||||
| func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { | ||||
| 	g := prometheus.NewGauge(opts) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(g) | ||||
| 	} | ||||
| 	return g | ||||
| } | ||||
|  | ||||
| // NewGaugeVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the GaugeVec with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { | ||||
| 	g := prometheus.NewGaugeVec(opts, labelNames) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(g) | ||||
| 	} | ||||
| 	return g | ||||
| } | ||||
|  | ||||
| // NewGaugeFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the GaugeFunc with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { | ||||
| 	g := prometheus.NewGaugeFunc(opts, function) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(g) | ||||
| 	} | ||||
| 	return g | ||||
| } | ||||
|  | ||||
| // NewSummary works like the function of the same name in the prometheus package | ||||
| // but it automatically registers the Summary with the Factory's Registerer. | ||||
| func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { | ||||
| 	s := prometheus.NewSummary(opts) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(s) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // NewSummaryVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the SummaryVec with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { | ||||
| 	s := prometheus.NewSummaryVec(opts, labelNames) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(s) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // NewHistogram works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the Histogram with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { | ||||
| 	h := prometheus.NewHistogram(opts) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(h) | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // NewHistogramVec works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the HistogramVec with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { | ||||
| 	h := prometheus.NewHistogramVec(opts, labelNames) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(h) | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // NewUntypedFunc works like the function of the same name in the prometheus | ||||
| // package but it automatically registers the UntypedFunc with the Factory's | ||||
| // Registerer. | ||||
| func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { | ||||
| 	u := prometheus.NewUntypedFunc(opts, function) | ||||
| 	if f.r != nil { | ||||
| 		f.r.MustRegister(u) | ||||
| 	} | ||||
| 	return u | ||||
| } | ||||
							
								
								
									
										370
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										370
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,370 +0,0 @@ | ||||
| // Copyright 2017 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package promhttp | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"io" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	closeNotifier = 1 << iota | ||||
| 	flusher | ||||
| 	hijacker | ||||
| 	readerFrom | ||||
| 	pusher | ||||
| ) | ||||
|  | ||||
| type delegator interface { | ||||
| 	http.ResponseWriter | ||||
|  | ||||
| 	Status() int | ||||
| 	Written() int64 | ||||
| } | ||||
|  | ||||
| type responseWriterDelegator struct { | ||||
| 	http.ResponseWriter | ||||
|  | ||||
| 	status             int | ||||
| 	written            int64 | ||||
| 	wroteHeader        bool | ||||
| 	observeWriteHeader func(int) | ||||
| } | ||||
|  | ||||
| func (r *responseWriterDelegator) Status() int { | ||||
| 	return r.status | ||||
| } | ||||
|  | ||||
| func (r *responseWriterDelegator) Written() int64 { | ||||
| 	return r.written | ||||
| } | ||||
|  | ||||
| func (r *responseWriterDelegator) WriteHeader(code int) { | ||||
| 	if r.observeWriteHeader != nil && !r.wroteHeader { | ||||
| 		// Only call observeWriteHeader for the 1st time. It's a bug if | ||||
| 		// WriteHeader is called more than once, but we want to protect | ||||
| 		// against it here. Note that we still delegate the WriteHeader | ||||
| 		// to the original ResponseWriter to not mask the bug from it. | ||||
| 		r.observeWriteHeader(code) | ||||
| 	} | ||||
| 	r.status = code | ||||
| 	r.wroteHeader = true | ||||
| 	r.ResponseWriter.WriteHeader(code) | ||||
| } | ||||
|  | ||||
| func (r *responseWriterDelegator) Write(b []byte) (int, error) { | ||||
| 	// If applicable, call WriteHeader here so that observeWriteHeader is | ||||
| 	// handled appropriately. | ||||
| 	if !r.wroteHeader { | ||||
| 		r.WriteHeader(http.StatusOK) | ||||
| 	} | ||||
| 	n, err := r.ResponseWriter.Write(b) | ||||
| 	r.written += int64(n) | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| type closeNotifierDelegator struct{ *responseWriterDelegator } | ||||
| type flusherDelegator struct{ *responseWriterDelegator } | ||||
| type hijackerDelegator struct{ *responseWriterDelegator } | ||||
| type readerFromDelegator struct{ *responseWriterDelegator } | ||||
| type pusherDelegator struct{ *responseWriterDelegator } | ||||
|  | ||||
| func (d closeNotifierDelegator) CloseNotify() <-chan bool { | ||||
| 	//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to | ||||
| 	//remove support from client_golang yet. | ||||
| 	return d.ResponseWriter.(http.CloseNotifier).CloseNotify() | ||||
| } | ||||
| func (d flusherDelegator) Flush() { | ||||
| 	// If applicable, call WriteHeader here so that observeWriteHeader is | ||||
| 	// handled appropriately. | ||||
| 	if !d.wroteHeader { | ||||
| 		d.WriteHeader(http.StatusOK) | ||||
| 	} | ||||
| 	d.ResponseWriter.(http.Flusher).Flush() | ||||
| } | ||||
| func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { | ||||
| 	return d.ResponseWriter.(http.Hijacker).Hijack() | ||||
| } | ||||
| func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { | ||||
| 	// If applicable, call WriteHeader here so that observeWriteHeader is | ||||
| 	// handled appropriately. | ||||
| 	if !d.wroteHeader { | ||||
| 		d.WriteHeader(http.StatusOK) | ||||
| 	} | ||||
| 	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) | ||||
| 	d.written += n | ||||
| 	return n, err | ||||
| } | ||||
| func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { | ||||
| 	return d.ResponseWriter.(http.Pusher).Push(target, opts) | ||||
| } | ||||
|  | ||||
| var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) | ||||
|  | ||||
| func init() { | ||||
| 	// TODO(beorn7): Code generation would help here. | ||||
| 	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 | ||||
| 		return d | ||||
| 	} | ||||
| 	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 | ||||
| 		return closeNotifierDelegator{d} | ||||
| 	} | ||||
| 	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 | ||||
| 		return flusherDelegator{d} | ||||
| 	} | ||||
| 	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 | ||||
| 		return hijackerDelegator{d} | ||||
| 	} | ||||
| 	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Hijacker | ||||
| 			http.CloseNotifier | ||||
| 		}{d, hijackerDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 		}{d, hijackerDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 | ||||
| 		return readerFromDelegator{d} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.CloseNotifier | ||||
| 		}{d, readerFromDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Flusher | ||||
| 		}{d, readerFromDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 		}{d, readerFromDelegator{d}, hijackerDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.CloseNotifier | ||||
| 		}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 | ||||
| 		return pusherDelegator{d} | ||||
| 	} | ||||
| 	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Flusher | ||||
| 		}{d, pusherDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Hijacker | ||||
| 		}{d, pusherDelegator{d}, hijackerDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Hijacker | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Flusher | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | ||||
| 	} | ||||
| 	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 | ||||
| 		return struct { | ||||
| 			*responseWriterDelegator | ||||
| 			http.Pusher | ||||
| 			io.ReaderFrom | ||||
| 			http.Hijacker | ||||
| 			http.Flusher | ||||
| 			http.CloseNotifier | ||||
| 		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { | ||||
| 	d := &responseWriterDelegator{ | ||||
| 		ResponseWriter:     w, | ||||
| 		observeWriteHeader: observeWriteHeaderFunc, | ||||
| 	} | ||||
|  | ||||
| 	id := 0 | ||||
| 	//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to | ||||
| 	//remove support from client_golang yet. | ||||
| 	if _, ok := w.(http.CloseNotifier); ok { | ||||
| 		id += closeNotifier | ||||
| 	} | ||||
| 	if _, ok := w.(http.Flusher); ok { | ||||
| 		id += flusher | ||||
| 	} | ||||
| 	if _, ok := w.(http.Hijacker); ok { | ||||
| 		id += hijacker | ||||
| 	} | ||||
| 	if _, ok := w.(io.ReaderFrom); ok { | ||||
| 		id += readerFrom | ||||
| 	} | ||||
| 	if _, ok := w.(http.Pusher); ok { | ||||
| 		id += pusher | ||||
| 	} | ||||
|  | ||||
| 	return pickDelegator[id](d) | ||||
| } | ||||
							
								
								
									
										383
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										383
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,383 +0,0 @@ | ||||
| // Copyright 2016 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Package promhttp provides tooling around HTTP servers and clients. | ||||
| // | ||||
| // First, the package allows the creation of http.Handler instances to expose | ||||
| // Prometheus metrics via HTTP. promhttp.Handler acts on the | ||||
| // prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a | ||||
| // custom registry or anything that implements the Gatherer interface. It also | ||||
| // allows the creation of handlers that act differently on errors or allow to | ||||
| // log errors. | ||||
| // | ||||
| // Second, the package provides tooling to instrument instances of http.Handler | ||||
| // via middleware. Middleware wrappers follow the naming scheme | ||||
| // InstrumentHandlerX, where X describes the intended use of the middleware. | ||||
| // See each function's doc comment for specific details. | ||||
| // | ||||
| // Finally, the package allows for an http.RoundTripper to be instrumented via | ||||
| // middleware. Middleware wrappers follow the naming scheme | ||||
| // InstrumentRoundTripperX, where X describes the intended use of the | ||||
| // middleware. See each function's doc comment for specific details. | ||||
| package promhttp | ||||
|  | ||||
| import ( | ||||
| 	"compress/gzip" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/prometheus/common/expfmt" | ||||
|  | ||||
| 	"github.com/prometheus/client_golang/prometheus" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	contentTypeHeader     = "Content-Type" | ||||
| 	contentEncodingHeader = "Content-Encoding" | ||||
| 	acceptEncodingHeader  = "Accept-Encoding" | ||||
| ) | ||||
|  | ||||
| var gzipPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return gzip.NewWriter(nil) | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| // Handler returns an http.Handler for the prometheus.DefaultGatherer, using | ||||
| // default HandlerOpts, i.e. it reports the first error as an HTTP error, it has | ||||
| // no error logging, and it applies compression if requested by the client. | ||||
| // | ||||
| // The returned http.Handler is already instrumented using the | ||||
| // InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you | ||||
| // create multiple http.Handlers by separate calls of the Handler function, the | ||||
| // metrics used for instrumentation will be shared between them, providing | ||||
| // global scrape counts. | ||||
| // | ||||
| // This function is meant to cover the bulk of basic use cases. If you are doing | ||||
| // anything that requires more customization (including using a non-default | ||||
| // Gatherer, different instrumentation, and non-default HandlerOpts), use the | ||||
| // HandlerFor function. See there for details. | ||||
| func Handler() http.Handler { | ||||
| 	return InstrumentMetricHandler( | ||||
| 		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), | ||||
| 	) | ||||
| } | ||||
|  | ||||
| // HandlerFor returns an uninstrumented http.Handler for the provided | ||||
| // Gatherer. The behavior of the Handler is defined by the provided | ||||
| // HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom | ||||
| // Gatherers, with non-default HandlerOpts, and/or with custom (or no) | ||||
| // instrumentation. Use the InstrumentMetricHandler function to apply the same | ||||
| // kind of instrumentation as it is used by the Handler function. | ||||
| func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { | ||||
| 	var ( | ||||
| 		inFlightSem chan struct{} | ||||
| 		errCnt      = prometheus.NewCounterVec( | ||||
| 			prometheus.CounterOpts{ | ||||
| 				Name: "promhttp_metric_handler_errors_total", | ||||
| 				Help: "Total number of internal errors encountered by the promhttp metric handler.", | ||||
| 			}, | ||||
| 			[]string{"cause"}, | ||||
| 		) | ||||
| 	) | ||||
|  | ||||
| 	if opts.MaxRequestsInFlight > 0 { | ||||
| 		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) | ||||
| 	} | ||||
| 	if opts.Registry != nil { | ||||
| 		// Initialize all possibilities that can occur below. | ||||
| 		errCnt.WithLabelValues("gathering") | ||||
| 		errCnt.WithLabelValues("encoding") | ||||
| 		if err := opts.Registry.Register(errCnt); err != nil { | ||||
| 			if are, ok := err.(prometheus.AlreadyRegisteredError); ok { | ||||
| 				errCnt = are.ExistingCollector.(*prometheus.CounterVec) | ||||
| 			} else { | ||||
| 				panic(err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { | ||||
| 		if inFlightSem != nil { | ||||
| 			select { | ||||
| 			case inFlightSem <- struct{}{}: // All good, carry on. | ||||
| 				defer func() { <-inFlightSem }() | ||||
| 			default: | ||||
| 				http.Error(rsp, fmt.Sprintf( | ||||
| 					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, | ||||
| 				), http.StatusServiceUnavailable) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 		mfs, err := reg.Gather() | ||||
| 		if err != nil { | ||||
| 			if opts.ErrorLog != nil { | ||||
| 				opts.ErrorLog.Println("error gathering metrics:", err) | ||||
| 			} | ||||
| 			errCnt.WithLabelValues("gathering").Inc() | ||||
| 			switch opts.ErrorHandling { | ||||
| 			case PanicOnError: | ||||
| 				panic(err) | ||||
| 			case ContinueOnError: | ||||
| 				if len(mfs) == 0 { | ||||
| 					// Still report the error if no metrics have been gathered. | ||||
| 					httpError(rsp, err) | ||||
| 					return | ||||
| 				} | ||||
| 			case HTTPErrorOnError: | ||||
| 				httpError(rsp, err) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		var contentType expfmt.Format | ||||
| 		if opts.EnableOpenMetrics { | ||||
| 			contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) | ||||
| 		} else { | ||||
| 			contentType = expfmt.Negotiate(req.Header) | ||||
| 		} | ||||
| 		header := rsp.Header() | ||||
| 		header.Set(contentTypeHeader, string(contentType)) | ||||
|  | ||||
| 		w := io.Writer(rsp) | ||||
| 		if !opts.DisableCompression && gzipAccepted(req.Header) { | ||||
| 			header.Set(contentEncodingHeader, "gzip") | ||||
| 			gz := gzipPool.Get().(*gzip.Writer) | ||||
| 			defer gzipPool.Put(gz) | ||||
|  | ||||
| 			gz.Reset(w) | ||||
| 			defer gz.Close() | ||||
|  | ||||
| 			w = gz | ||||
| 		} | ||||
|  | ||||
| 		enc := expfmt.NewEncoder(w, contentType) | ||||
|  | ||||
| 		// handleError handles the error according to opts.ErrorHandling | ||||
| 		// and returns true if we have to abort after the handling. | ||||
| 		handleError := func(err error) bool { | ||||
| 			if err == nil { | ||||
| 				return false | ||||
| 			} | ||||
| 			if opts.ErrorLog != nil { | ||||
| 				opts.ErrorLog.Println("error encoding and sending metric family:", err) | ||||
| 			} | ||||
| 			errCnt.WithLabelValues("encoding").Inc() | ||||
| 			switch opts.ErrorHandling { | ||||
| 			case PanicOnError: | ||||
| 				panic(err) | ||||
| 			case HTTPErrorOnError: | ||||
| 				// We cannot really send an HTTP error at this | ||||
| 				// point because we most likely have written | ||||
| 				// something to rsp already. But at least we can | ||||
| 				// stop sending. | ||||
| 				return true | ||||
| 			} | ||||
| 			// Do nothing in all other cases, including ContinueOnError. | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		for _, mf := range mfs { | ||||
| 			if handleError(enc.Encode(mf)) { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 		if closer, ok := enc.(expfmt.Closer); ok { | ||||
| 			// This in particular takes care of the final "# EOF\n" line for OpenMetrics. | ||||
| 			if handleError(closer.Close()) { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	if opts.Timeout <= 0 { | ||||
| 		return h | ||||
| 	} | ||||
| 	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( | ||||
| 		"Exceeded configured timeout of %v.\n", | ||||
| 		opts.Timeout, | ||||
| 	)) | ||||
| } | ||||
|  | ||||
| // InstrumentMetricHandler is usually used with an http.Handler returned by the | ||||
| // HandlerFor function. It instruments the provided http.Handler with two | ||||
| // metrics: A counter vector "promhttp_metric_handler_requests_total" to count | ||||
| // scrapes partitioned by HTTP status code, and a gauge | ||||
| // "promhttp_metric_handler_requests_in_flight" to track the number of | ||||
| // simultaneous scrapes. This function idempotently registers collectors for | ||||
| // both metrics with the provided Registerer. It panics if the registration | ||||
| // fails. The provided metrics are useful to see how many scrapes hit the | ||||
| // monitored target (which could be from different Prometheus servers or other | ||||
| // scrapers), and how often they overlap (which would result in more than one | ||||
| // scrape in flight at the same time). Note that the scrapes-in-flight gauge | ||||
| // will contain the scrape by which it is exposed, while the scrape counter will | ||||
| // only get incremented after the scrape is complete (as only then the status | ||||
| // code is known). For tracking scrape durations, use the | ||||
| // "scrape_duration_seconds" gauge created by the Prometheus server upon each | ||||
| // scrape. | ||||
| func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { | ||||
| 	cnt := prometheus.NewCounterVec( | ||||
| 		prometheus.CounterOpts{ | ||||
| 			Name: "promhttp_metric_handler_requests_total", | ||||
| 			Help: "Total number of scrapes by HTTP status code.", | ||||
| 		}, | ||||
| 		[]string{"code"}, | ||||
| 	) | ||||
| 	// Initialize the most likely HTTP status codes. | ||||
| 	cnt.WithLabelValues("200") | ||||
| 	cnt.WithLabelValues("500") | ||||
| 	cnt.WithLabelValues("503") | ||||
| 	if err := reg.Register(cnt); err != nil { | ||||
| 		if are, ok := err.(prometheus.AlreadyRegisteredError); ok { | ||||
| 			cnt = are.ExistingCollector.(*prometheus.CounterVec) | ||||
| 		} else { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	gge := prometheus.NewGauge(prometheus.GaugeOpts{ | ||||
| 		Name: "promhttp_metric_handler_requests_in_flight", | ||||
| 		Help: "Current number of scrapes being served.", | ||||
| 	}) | ||||
| 	if err := reg.Register(gge); err != nil { | ||||
| 		if are, ok := err.(prometheus.AlreadyRegisteredError); ok { | ||||
| 			gge = are.ExistingCollector.(prometheus.Gauge) | ||||
| 		} else { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) | ||||
| } | ||||
|  | ||||
| // HandlerErrorHandling defines how a Handler serving metrics will handle | ||||
| // errors. | ||||
| type HandlerErrorHandling int | ||||
|  | ||||
| // These constants cause handlers serving metrics to behave as described if | ||||
| // errors are encountered. | ||||
| const ( | ||||
| 	// Serve an HTTP status code 500 upon the first error | ||||
| 	// encountered. Report the error message in the body. Note that HTTP | ||||
| 	// errors cannot be served anymore once the beginning of a regular | ||||
| 	// payload has been sent. Thus, in the (unlikely) case that encoding the | ||||
| 	// payload into the negotiated wire format fails, serving the response | ||||
| 	// will simply be aborted. Set an ErrorLog in HandlerOpts to detect | ||||
| 	// those errors. | ||||
| 	HTTPErrorOnError HandlerErrorHandling = iota | ||||
| 	// Ignore errors and try to serve as many metrics as possible.  However, | ||||
| 	// if no metrics can be served, serve an HTTP status code 500 and the | ||||
| 	// last error message in the body. Only use this in deliberate "best | ||||
| 	// effort" metrics collection scenarios. In this case, it is highly | ||||
| 	// recommended to provide other means of detecting errors: By setting an | ||||
| 	// ErrorLog in HandlerOpts, the errors are logged. By providing a | ||||
| 	// Registry in HandlerOpts, the exposed metrics include an error counter | ||||
| 	// "promhttp_metric_handler_errors_total", which can be used for | ||||
| 	// alerts. | ||||
| 	ContinueOnError | ||||
| 	// Panic upon the first error encountered (useful for "crash only" apps). | ||||
| 	PanicOnError | ||||
| ) | ||||
|  | ||||
| // Logger is the minimal interface HandlerOpts needs for logging. Note that | ||||
| // log.Logger from the standard library implements this interface, and it is | ||||
| // easy to implement by custom loggers, if they don't do so already anyway. | ||||
| type Logger interface { | ||||
| 	Println(v ...interface{}) | ||||
| } | ||||
|  | ||||
| // HandlerOpts specifies options how to serve metrics via an http.Handler. The | ||||
| // zero value of HandlerOpts is a reasonable default. | ||||
| type HandlerOpts struct { | ||||
| 	// ErrorLog specifies an optional Logger for errors collecting and | ||||
| 	// serving metrics. If nil, errors are not logged at all. Note that the | ||||
| 	// type of a reported error is often prometheus.MultiError, which | ||||
| 	// formats into a multi-line error string. If you want to avoid the | ||||
| 	// latter, create a Logger implementation that detects a | ||||
| 	// prometheus.MultiError and formats the contained errors into one line. | ||||
| 	ErrorLog Logger | ||||
| 	// ErrorHandling defines how errors are handled. Note that errors are | ||||
| 	// logged regardless of the configured ErrorHandling provided ErrorLog | ||||
| 	// is not nil. | ||||
| 	ErrorHandling HandlerErrorHandling | ||||
| 	// If Registry is not nil, it is used to register a metric | ||||
| 	// "promhttp_metric_handler_errors_total", partitioned by "cause". A | ||||
| 	// failed registration causes a panic. Note that this error counter is | ||||
| 	// different from the instrumentation you get from the various | ||||
| 	// InstrumentHandler... helpers. It counts errors that don't necessarily | ||||
| 	// result in a non-2xx HTTP status code. There are two typical cases: | ||||
| 	// (1) Encoding errors that only happen after streaming of the HTTP body | ||||
| 	// has already started (and the status code 200 has been sent). This | ||||
| 	// should only happen with custom collectors. (2) Collection errors with | ||||
| 	// no effect on the HTTP status code because ErrorHandling is set to | ||||
| 	// ContinueOnError. | ||||
| 	Registry prometheus.Registerer | ||||
| 	// If DisableCompression is true, the handler will never compress the | ||||
| 	// response, even if requested by the client. | ||||
| 	DisableCompression bool | ||||
| 	// The number of concurrent HTTP requests is limited to | ||||
| 	// MaxRequestsInFlight. Additional requests are responded to with 503 | ||||
| 	// Service Unavailable and a suitable message in the body. If | ||||
| 	// MaxRequestsInFlight is 0 or negative, no limit is applied. | ||||
| 	MaxRequestsInFlight int | ||||
| 	// If handling a request takes longer than Timeout, it is responded to | ||||
| 	// with 503 ServiceUnavailable and a suitable Message. No timeout is | ||||
| 	// applied if Timeout is 0 or negative. Note that with the current | ||||
| 	// implementation, reaching the timeout simply ends the HTTP requests as | ||||
| 	// described above (and even that only if sending of the body hasn't | ||||
| 	// started yet), while the bulk work of gathering all the metrics keeps | ||||
| 	// running in the background (with the eventual result to be thrown | ||||
| 	// away). Until the implementation is improved, it is recommended to | ||||
| 	// implement a separate timeout in potentially slow Collectors. | ||||
| 	Timeout time.Duration | ||||
| 	// If true, the experimental OpenMetrics encoding is added to the | ||||
| 	// possible options during content negotiation. Note that Prometheus | ||||
| 	// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is | ||||
| 	// the only way to transmit exemplars. However, the move to OpenMetrics | ||||
| 	// is not completely transparent. Most notably, the values of "quantile" | ||||
| 	// labels of Summaries and "le" labels of Histograms are formatted with | ||||
| 	// a trailing ".0" if they would otherwise look like integer numbers | ||||
| 	// (which changes the identity of the resulting series on the Prometheus | ||||
| 	// server). | ||||
| 	EnableOpenMetrics bool | ||||
| } | ||||
|  | ||||
| // gzipAccepted returns whether the client will accept gzip-encoded content. | ||||
| func gzipAccepted(header http.Header) bool { | ||||
| 	a := header.Get(acceptEncodingHeader) | ||||
| 	parts := strings.Split(a, ",") | ||||
| 	for _, part := range parts { | ||||
| 		part = strings.TrimSpace(part) | ||||
| 		if part == "gzip" || strings.HasPrefix(part, "gzip;") { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // httpError removes any content-encoding header and then calls http.Error with | ||||
| // the provided error and http.StatusInternalServerError. Error contents is | ||||
| // supposed to be uncompressed plain text. Same as with a plain http.Error, this | ||||
| // must not be called if the header or any payload has already been sent. | ||||
| func httpError(rsp http.ResponseWriter, err error) { | ||||
| 	rsp.Header().Del(contentEncodingHeader) | ||||
| 	http.Error( | ||||
| 		rsp, | ||||
| 		"An error has occurred while serving metrics:\n\n"+err.Error(), | ||||
| 		http.StatusInternalServerError, | ||||
| 	) | ||||
| } | ||||
							
								
								
									
										219
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										219
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,219 +0,0 @@ | ||||
| // Copyright 2017 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package promhttp | ||||
|  | ||||
| import ( | ||||
| 	"crypto/tls" | ||||
| 	"net/http" | ||||
| 	"net/http/httptrace" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/prometheus/client_golang/prometheus" | ||||
| ) | ||||
|  | ||||
| // The RoundTripperFunc type is an adapter to allow the use of ordinary | ||||
| // functions as RoundTrippers. If f is a function with the appropriate | ||||
| // signature, RountTripperFunc(f) is a RoundTripper that calls f. | ||||
| type RoundTripperFunc func(req *http.Request) (*http.Response, error) | ||||
|  | ||||
| // RoundTrip implements the RoundTripper interface. | ||||
| func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { | ||||
| 	return rt(r) | ||||
| } | ||||
|  | ||||
| // InstrumentRoundTripperInFlight is a middleware that wraps the provided | ||||
| // http.RoundTripper. It sets the provided prometheus.Gauge to the number of | ||||
| // requests currently handled by the wrapped http.RoundTripper. | ||||
| // | ||||
| // See the example for ExampleInstrumentRoundTripperDuration for example usage. | ||||
| func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { | ||||
| 	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | ||||
| 		gauge.Inc() | ||||
| 		defer gauge.Dec() | ||||
| 		return next.RoundTrip(r) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentRoundTripperCounter is a middleware that wraps the provided | ||||
| // http.RoundTripper to observe the request result with the provided CounterVec. | ||||
| // The CounterVec must have zero, one, or two non-const non-curried labels. For | ||||
| // those, the only allowed label names are "code" and "method". The function | ||||
| // panics otherwise. Partitioning of the CounterVec happens by HTTP status code | ||||
| // and/or HTTP method if the respective instance label names are present in the | ||||
| // CounterVec. For unpartitioned counting, use a CounterVec with zero labels. | ||||
| // | ||||
| // If the wrapped RoundTripper panics or returns a non-nil error, the Counter | ||||
| // is not incremented. | ||||
| // | ||||
| // See the example for ExampleInstrumentRoundTripperDuration for example usage. | ||||
| func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { | ||||
| 	code, method := checkLabels(counter) | ||||
|  | ||||
| 	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | ||||
| 		resp, err := next.RoundTrip(r) | ||||
| 		if err == nil { | ||||
| 			counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() | ||||
| 		} | ||||
| 		return resp, err | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentRoundTripperDuration is a middleware that wraps the provided | ||||
| // http.RoundTripper to observe the request duration with the provided | ||||
| // ObserverVec.  The ObserverVec must have zero, one, or two non-const | ||||
| // non-curried labels. For those, the only allowed label names are "code" and | ||||
| // "method". The function panics otherwise. The Observe method of the Observer | ||||
| // in the ObserverVec is called with the request duration in | ||||
| // seconds. Partitioning happens by HTTP status code and/or HTTP method if the | ||||
| // respective instance label names are present in the ObserverVec. For | ||||
| // unpartitioned observations, use an ObserverVec with zero labels. Note that | ||||
| // partitioning of Histograms is expensive and should be used judiciously. | ||||
| // | ||||
| // If the wrapped RoundTripper panics or returns a non-nil error, no values are | ||||
| // reported. | ||||
| // | ||||
| // Note that this method is only guaranteed to never observe negative durations | ||||
| // if used with Go1.9+. | ||||
| func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { | ||||
| 	code, method := checkLabels(obs) | ||||
|  | ||||
| 	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | ||||
| 		start := time.Now() | ||||
| 		resp, err := next.RoundTrip(r) | ||||
| 		if err == nil { | ||||
| 			obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) | ||||
| 		} | ||||
| 		return resp, err | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentTrace is used to offer flexibility in instrumenting the available | ||||
| // httptrace.ClientTrace hook functions. Each function is passed a float64 | ||||
| // representing the time in seconds since the start of the http request. A user | ||||
| // may choose to use separately buckets Histograms, or implement custom | ||||
| // instance labels on a per function basis. | ||||
| type InstrumentTrace struct { | ||||
| 	GotConn              func(float64) | ||||
| 	PutIdleConn          func(float64) | ||||
| 	GotFirstResponseByte func(float64) | ||||
| 	Got100Continue       func(float64) | ||||
| 	DNSStart             func(float64) | ||||
| 	DNSDone              func(float64) | ||||
| 	ConnectStart         func(float64) | ||||
| 	ConnectDone          func(float64) | ||||
| 	TLSHandshakeStart    func(float64) | ||||
| 	TLSHandshakeDone     func(float64) | ||||
| 	WroteHeaders         func(float64) | ||||
| 	Wait100Continue      func(float64) | ||||
| 	WroteRequest         func(float64) | ||||
| } | ||||
|  | ||||
| // InstrumentRoundTripperTrace is a middleware that wraps the provided | ||||
| // RoundTripper and reports times to hook functions provided in the | ||||
| // InstrumentTrace struct. Hook functions that are not present in the provided | ||||
| // InstrumentTrace struct are ignored. Times reported to the hook functions are | ||||
| // time since the start of the request. Only with Go1.9+, those times are | ||||
| // guaranteed to never be negative. (Earlier Go versions are not using a | ||||
| // monotonic clock.) Note that partitioning of Histograms is expensive and | ||||
| // should be used judiciously. | ||||
| // | ||||
| // For hook functions that receive an error as an argument, no observations are | ||||
| // made in the event of a non-nil error value. | ||||
| // | ||||
| // See the example for ExampleInstrumentRoundTripperDuration for example usage. | ||||
| func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { | ||||
| 	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | ||||
| 		start := time.Now() | ||||
|  | ||||
| 		trace := &httptrace.ClientTrace{ | ||||
| 			GotConn: func(_ httptrace.GotConnInfo) { | ||||
| 				if it.GotConn != nil { | ||||
| 					it.GotConn(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			PutIdleConn: func(err error) { | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				if it.PutIdleConn != nil { | ||||
| 					it.PutIdleConn(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			DNSStart: func(_ httptrace.DNSStartInfo) { | ||||
| 				if it.DNSStart != nil { | ||||
| 					it.DNSStart(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			DNSDone: func(_ httptrace.DNSDoneInfo) { | ||||
| 				if it.DNSDone != nil { | ||||
| 					it.DNSDone(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			ConnectStart: func(_, _ string) { | ||||
| 				if it.ConnectStart != nil { | ||||
| 					it.ConnectStart(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			ConnectDone: func(_, _ string, err error) { | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				if it.ConnectDone != nil { | ||||
| 					it.ConnectDone(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			GotFirstResponseByte: func() { | ||||
| 				if it.GotFirstResponseByte != nil { | ||||
| 					it.GotFirstResponseByte(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			Got100Continue: func() { | ||||
| 				if it.Got100Continue != nil { | ||||
| 					it.Got100Continue(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			TLSHandshakeStart: func() { | ||||
| 				if it.TLSHandshakeStart != nil { | ||||
| 					it.TLSHandshakeStart(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			TLSHandshakeDone: func(_ tls.ConnectionState, err error) { | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				if it.TLSHandshakeDone != nil { | ||||
| 					it.TLSHandshakeDone(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			WroteHeaders: func() { | ||||
| 				if it.WroteHeaders != nil { | ||||
| 					it.WroteHeaders(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			Wait100Continue: func() { | ||||
| 				if it.Wait100Continue != nil { | ||||
| 					it.Wait100Continue(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 			WroteRequest: func(_ httptrace.WroteRequestInfo) { | ||||
| 				if it.WroteRequest != nil { | ||||
| 					it.WroteRequest(time.Since(start).Seconds()) | ||||
| 				} | ||||
| 			}, | ||||
| 		} | ||||
| 		r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) | ||||
|  | ||||
| 		return next.RoundTrip(r) | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										458
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										458
									
								
								vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,458 +0,0 @@ | ||||
| // Copyright 2017 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package promhttp | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"net/http" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
|  | ||||
| 	"github.com/prometheus/client_golang/prometheus" | ||||
| ) | ||||
|  | ||||
| // magicString is used for the hacky label test in checkLabels. Remove once fixed. | ||||
| const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" | ||||
|  | ||||
| // InstrumentHandlerInFlight is a middleware that wraps the provided | ||||
| // http.Handler. It sets the provided prometheus.Gauge to the number of | ||||
| // requests currently handled by the wrapped http.Handler. | ||||
| // | ||||
| // See the example for InstrumentHandlerDuration for example usage. | ||||
| func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		g.Inc() | ||||
| 		defer g.Dec() | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentHandlerDuration is a middleware that wraps the provided | ||||
| // http.Handler to observe the request duration with the provided ObserverVec. | ||||
| // The ObserverVec must have valid metric and label names and must have zero, | ||||
| // one, or two non-const non-curried labels. For those, the only allowed label | ||||
| // names are "code" and "method". The function panics otherwise. The Observe | ||||
| // method of the Observer in the ObserverVec is called with the request duration | ||||
| // in seconds. Partitioning happens by HTTP status code and/or HTTP method if | ||||
| // the respective instance label names are present in the ObserverVec. For | ||||
| // unpartitioned observations, use an ObserverVec with zero labels. Note that | ||||
| // partitioning of Histograms is expensive and should be used judiciously. | ||||
| // | ||||
| // If the wrapped Handler does not set a status code, a status code of 200 is assumed. | ||||
| // | ||||
| // If the wrapped Handler panics, no values are reported. | ||||
| // | ||||
| // Note that this method is only guaranteed to never observe negative durations | ||||
| // if used with Go1.9+. | ||||
| func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | ||||
| 	code, method := checkLabels(obs) | ||||
|  | ||||
| 	if code { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			now := time.Now() | ||||
| 			d := newDelegator(w, nil) | ||||
| 			next.ServeHTTP(d, r) | ||||
|  | ||||
| 			obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		now := time.Now() | ||||
| 		next.ServeHTTP(w, r) | ||||
| 		obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler | ||||
| // to observe the request result with the provided CounterVec. The CounterVec | ||||
| // must have valid metric and label names and must have zero, one, or two | ||||
| // non-const non-curried labels. For those, the only allowed label names are | ||||
| // "code" and "method". The function panics otherwise. Partitioning of the | ||||
| // CounterVec happens by HTTP status code and/or HTTP method if the respective | ||||
| // instance label names are present in the CounterVec. For unpartitioned | ||||
| // counting, use a CounterVec with zero labels. | ||||
| // | ||||
| // If the wrapped Handler does not set a status code, a status code of 200 is assumed. | ||||
| // | ||||
| // If the wrapped Handler panics, the Counter is not incremented. | ||||
| // | ||||
| // See the example for InstrumentHandlerDuration for example usage. | ||||
| func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { | ||||
| 	code, method := checkLabels(counter) | ||||
|  | ||||
| 	if code { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			d := newDelegator(w, nil) | ||||
| 			next.ServeHTTP(d, r) | ||||
| 			counter.With(labels(code, method, r.Method, d.Status())).Inc() | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		next.ServeHTTP(w, r) | ||||
| 		counter.With(labels(code, method, r.Method, 0)).Inc() | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided | ||||
| // http.Handler to observe with the provided ObserverVec the request duration | ||||
| // until the response headers are written. The ObserverVec must have valid | ||||
| // metric and label names and must have zero, one, or two non-const non-curried | ||||
| // labels. For those, the only allowed label names are "code" and "method". The | ||||
| // function panics otherwise. The Observe method of the Observer in the | ||||
| // ObserverVec is called with the request duration in seconds. Partitioning | ||||
| // happens by HTTP status code and/or HTTP method if the respective instance | ||||
| // label names are present in the ObserverVec. For unpartitioned observations, | ||||
| // use an ObserverVec with zero labels. Note that partitioning of Histograms is | ||||
| // expensive and should be used judiciously. | ||||
| // | ||||
| // If the wrapped Handler panics before calling WriteHeader, no value is | ||||
| // reported. | ||||
| // | ||||
| // Note that this method is only guaranteed to never observe negative durations | ||||
| // if used with Go1.9+. | ||||
| // | ||||
| // See the example for InstrumentHandlerDuration for example usage. | ||||
| func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | ||||
| 	code, method := checkLabels(obs) | ||||
|  | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		now := time.Now() | ||||
| 		d := newDelegator(w, func(status int) { | ||||
| 			obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) | ||||
| 		}) | ||||
| 		next.ServeHTTP(d, r) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentHandlerRequestSize is a middleware that wraps the provided | ||||
| // http.Handler to observe the request size with the provided ObserverVec. The | ||||
| // ObserverVec must have valid metric and label names and must have zero, one, | ||||
| // or two non-const non-curried labels. For those, the only allowed label names | ||||
| // are "code" and "method". The function panics otherwise. The Observe method of | ||||
| // the Observer in the ObserverVec is called with the request size in | ||||
| // bytes. Partitioning happens by HTTP status code and/or HTTP method if the | ||||
| // respective instance label names are present in the ObserverVec. For | ||||
| // unpartitioned observations, use an ObserverVec with zero labels. Note that | ||||
| // partitioning of Histograms is expensive and should be used judiciously. | ||||
| // | ||||
| // If the wrapped Handler does not set a status code, a status code of 200 is assumed. | ||||
| // | ||||
| // If the wrapped Handler panics, no values are reported. | ||||
| // | ||||
| // See the example for InstrumentHandlerDuration for example usage. | ||||
| func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | ||||
| 	code, method := checkLabels(obs) | ||||
|  | ||||
| 	if code { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			d := newDelegator(w, nil) | ||||
| 			next.ServeHTTP(d, r) | ||||
| 			size := computeApproximateRequestSize(r) | ||||
| 			obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		next.ServeHTTP(w, r) | ||||
| 		size := computeApproximateRequestSize(r) | ||||
| 		obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // InstrumentHandlerResponseSize is a middleware that wraps the provided | ||||
| // http.Handler to observe the response size with the provided ObserverVec. The | ||||
| // ObserverVec must have valid metric and label names and must have zero, one, | ||||
| // or two non-const non-curried labels. For those, the only allowed label names | ||||
| // are "code" and "method". The function panics otherwise. The Observe method of | ||||
| // the Observer in the ObserverVec is called with the response size in | ||||
| // bytes. Partitioning happens by HTTP status code and/or HTTP method if the | ||||
| // respective instance label names are present in the ObserverVec. For | ||||
| // unpartitioned observations, use an ObserverVec with zero labels. Note that | ||||
| // partitioning of Histograms is expensive and should be used judiciously. | ||||
| // | ||||
| // If the wrapped Handler does not set a status code, a status code of 200 is assumed. | ||||
| // | ||||
| // If the wrapped Handler panics, no values are reported. | ||||
| // | ||||
| // See the example for InstrumentHandlerDuration for example usage. | ||||
| func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { | ||||
| 	code, method := checkLabels(obs) | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		d := newDelegator(w, nil) | ||||
| 		next.ServeHTTP(d, r) | ||||
| 		obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // checkLabels returns whether the provided Collector has a non-const, | ||||
| // non-curried label named "code" and/or "method". It panics if the provided | ||||
| // Collector does not have a Desc or has more than one Desc or its Desc is | ||||
| // invalid. It also panics if the Collector has any non-const, non-curried | ||||
| // labels that are not named "code" or "method". | ||||
| func checkLabels(c prometheus.Collector) (code bool, method bool) { | ||||
| 	// TODO(beorn7): Remove this hacky way to check for instance labels | ||||
| 	// once Descriptors can have their dimensionality queried. | ||||
| 	var ( | ||||
| 		desc *prometheus.Desc | ||||
| 		m    prometheus.Metric | ||||
| 		pm   dto.Metric | ||||
| 		lvs  []string | ||||
| 	) | ||||
|  | ||||
| 	// Get the Desc from the Collector. | ||||
| 	descc := make(chan *prometheus.Desc, 1) | ||||
| 	c.Describe(descc) | ||||
|  | ||||
| 	select { | ||||
| 	case desc = <-descc: | ||||
| 	default: | ||||
| 		panic("no description provided by collector") | ||||
| 	} | ||||
| 	select { | ||||
| 	case <-descc: | ||||
| 		panic("more than one description provided by collector") | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	close(descc) | ||||
|  | ||||
| 	// Make sure the Collector has a valid Desc by registering it with a | ||||
| 	// temporary registry. | ||||
| 	prometheus.NewRegistry().MustRegister(c) | ||||
|  | ||||
| 	// Create a ConstMetric with the Desc. Since we don't know how many | ||||
| 	// variable labels there are, try for as long as it needs. | ||||
| 	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { | ||||
| 		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) | ||||
| 	} | ||||
|  | ||||
| 	// Write out the metric into a proto message and look at the labels. | ||||
| 	// If the value is not the magicString, it is a constLabel, which doesn't interest us. | ||||
| 	// If the label is curried, it doesn't interest us. | ||||
| 	// In all other cases, only "code" or "method" is allowed. | ||||
| 	if err := m.Write(&pm); err != nil { | ||||
| 		panic("error checking metric for labels") | ||||
| 	} | ||||
| 	for _, label := range pm.Label { | ||||
| 		name, value := label.GetName(), label.GetValue() | ||||
| 		if value != magicString || isLabelCurried(c, name) { | ||||
| 			continue | ||||
| 		} | ||||
| 		switch name { | ||||
| 		case "code": | ||||
| 			code = true | ||||
| 		case "method": | ||||
| 			method = true | ||||
| 		default: | ||||
| 			panic("metric partitioned with non-supported labels") | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func isLabelCurried(c prometheus.Collector, label string) bool { | ||||
| 	// This is even hackier than the label test above. | ||||
| 	// We essentially try to curry again and see if it works. | ||||
| 	// But for that, we need to type-convert to the two | ||||
| 	// types we use here, ObserverVec or *CounterVec. | ||||
| 	switch v := c.(type) { | ||||
| 	case *prometheus.CounterVec: | ||||
| 		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 	case prometheus.ObserverVec: | ||||
| 		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 	default: | ||||
| 		panic("unsupported metric vec type") | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // emptyLabels is a one-time allocation for non-partitioned metrics to avoid | ||||
| // unnecessary allocations on each request. | ||||
| var emptyLabels = prometheus.Labels{} | ||||
|  | ||||
| func labels(code, method bool, reqMethod string, status int) prometheus.Labels { | ||||
| 	if !(code || method) { | ||||
| 		return emptyLabels | ||||
| 	} | ||||
| 	labels := prometheus.Labels{} | ||||
|  | ||||
| 	if code { | ||||
| 		labels["code"] = sanitizeCode(status) | ||||
| 	} | ||||
| 	if method { | ||||
| 		labels["method"] = sanitizeMethod(reqMethod) | ||||
| 	} | ||||
|  | ||||
| 	return labels | ||||
| } | ||||
|  | ||||
| func computeApproximateRequestSize(r *http.Request) int { | ||||
| 	s := 0 | ||||
| 	if r.URL != nil { | ||||
| 		s += len(r.URL.String()) | ||||
| 	} | ||||
|  | ||||
| 	s += len(r.Method) | ||||
| 	s += len(r.Proto) | ||||
| 	for name, values := range r.Header { | ||||
| 		s += len(name) | ||||
| 		for _, value := range values { | ||||
| 			s += len(value) | ||||
| 		} | ||||
| 	} | ||||
| 	s += len(r.Host) | ||||
|  | ||||
| 	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. | ||||
|  | ||||
| 	if r.ContentLength != -1 { | ||||
| 		s += int(r.ContentLength) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| func sanitizeMethod(m string) string { | ||||
| 	switch m { | ||||
| 	case "GET", "get": | ||||
| 		return "get" | ||||
| 	case "PUT", "put": | ||||
| 		return "put" | ||||
| 	case "HEAD", "head": | ||||
| 		return "head" | ||||
| 	case "POST", "post": | ||||
| 		return "post" | ||||
| 	case "DELETE", "delete": | ||||
| 		return "delete" | ||||
| 	case "CONNECT", "connect": | ||||
| 		return "connect" | ||||
| 	case "OPTIONS", "options": | ||||
| 		return "options" | ||||
| 	case "NOTIFY", "notify": | ||||
| 		return "notify" | ||||
| 	default: | ||||
| 		return strings.ToLower(m) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // If the wrapped http.Handler has not set a status code, i.e. the value is | ||||
| // currently 0, santizeCode will return 200, for consistency with behavior in | ||||
| // the stdlib. | ||||
| func sanitizeCode(s int) string { | ||||
| 	switch s { | ||||
| 	case 100: | ||||
| 		return "100" | ||||
| 	case 101: | ||||
| 		return "101" | ||||
|  | ||||
| 	case 200, 0: | ||||
| 		return "200" | ||||
| 	case 201: | ||||
| 		return "201" | ||||
| 	case 202: | ||||
| 		return "202" | ||||
| 	case 203: | ||||
| 		return "203" | ||||
| 	case 204: | ||||
| 		return "204" | ||||
| 	case 205: | ||||
| 		return "205" | ||||
| 	case 206: | ||||
| 		return "206" | ||||
|  | ||||
| 	case 300: | ||||
| 		return "300" | ||||
| 	case 301: | ||||
| 		return "301" | ||||
| 	case 302: | ||||
| 		return "302" | ||||
| 	case 304: | ||||
| 		return "304" | ||||
| 	case 305: | ||||
| 		return "305" | ||||
| 	case 307: | ||||
| 		return "307" | ||||
|  | ||||
| 	case 400: | ||||
| 		return "400" | ||||
| 	case 401: | ||||
| 		return "401" | ||||
| 	case 402: | ||||
| 		return "402" | ||||
| 	case 403: | ||||
| 		return "403" | ||||
| 	case 404: | ||||
| 		return "404" | ||||
| 	case 405: | ||||
| 		return "405" | ||||
| 	case 406: | ||||
| 		return "406" | ||||
| 	case 407: | ||||
| 		return "407" | ||||
| 	case 408: | ||||
| 		return "408" | ||||
| 	case 409: | ||||
| 		return "409" | ||||
| 	case 410: | ||||
| 		return "410" | ||||
| 	case 411: | ||||
| 		return "411" | ||||
| 	case 412: | ||||
| 		return "412" | ||||
| 	case 413: | ||||
| 		return "413" | ||||
| 	case 414: | ||||
| 		return "414" | ||||
| 	case 415: | ||||
| 		return "415" | ||||
| 	case 416: | ||||
| 		return "416" | ||||
| 	case 417: | ||||
| 		return "417" | ||||
| 	case 418: | ||||
| 		return "418" | ||||
|  | ||||
| 	case 500: | ||||
| 		return "500" | ||||
| 	case 501: | ||||
| 		return "501" | ||||
| 	case 502: | ||||
| 		return "502" | ||||
| 	case 503: | ||||
| 		return "503" | ||||
| 	case 504: | ||||
| 		return "504" | ||||
| 	case 505: | ||||
| 		return "505" | ||||
|  | ||||
| 	case 428: | ||||
| 		return "428" | ||||
| 	case 429: | ||||
| 		return "429" | ||||
| 	case 431: | ||||
| 		return "431" | ||||
| 	case 511: | ||||
| 		return "511" | ||||
|  | ||||
| 	default: | ||||
| 		return strconv.Itoa(s) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										950
									
								
								vendor/github.com/prometheus/client_golang/prometheus/registry.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										950
									
								
								vendor/github.com/prometheus/client_golang/prometheus/registry.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,950 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/cespare/xxhash/v2" | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/prometheus/common/expfmt" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
|  | ||||
| 	"github.com/prometheus/client_golang/prometheus/internal" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// Capacity for the channel to collect metrics and descriptors. | ||||
| 	capMetricChan = 1000 | ||||
| 	capDescChan   = 10 | ||||
| ) | ||||
|  | ||||
| // DefaultRegisterer and DefaultGatherer are the implementations of the | ||||
| // Registerer and Gatherer interface a number of convenience functions in this | ||||
| // package act on. Initially, both variables point to the same Registry, which | ||||
| // has a process collector (currently on Linux only, see NewProcessCollector) | ||||
| // and a Go collector (see NewGoCollector, in particular the note about | ||||
| // stop-the-world implication with Go versions older than 1.9) already | ||||
| // registered. This approach to keep default instances as global state mirrors | ||||
| // the approach of other packages in the Go standard library. Note that there | ||||
| // are caveats. Change the variables with caution and only if you understand the | ||||
| // consequences. Users who want to avoid global state altogether should not use | ||||
| // the convenience functions and act on custom instances instead. | ||||
| var ( | ||||
| 	defaultRegistry              = NewRegistry() | ||||
| 	DefaultRegisterer Registerer = defaultRegistry | ||||
| 	DefaultGatherer   Gatherer   = defaultRegistry | ||||
| ) | ||||
|  | ||||
| func init() { | ||||
| 	MustRegister(NewProcessCollector(ProcessCollectorOpts{})) | ||||
| 	MustRegister(NewGoCollector()) | ||||
| } | ||||
|  | ||||
| // NewRegistry creates a new vanilla Registry without any Collectors | ||||
| // pre-registered. | ||||
| func NewRegistry() *Registry { | ||||
| 	return &Registry{ | ||||
| 		collectorsByID:  map[uint64]Collector{}, | ||||
| 		descIDs:         map[uint64]struct{}{}, | ||||
| 		dimHashesByName: map[string]uint64{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewPedanticRegistry returns a registry that checks during collection if each | ||||
| // collected Metric is consistent with its reported Desc, and if the Desc has | ||||
| // actually been registered with the registry. Unchecked Collectors (those whose | ||||
| // Describe method does not yield any descriptors) are excluded from the check. | ||||
| // | ||||
| // Usually, a Registry will be happy as long as the union of all collected | ||||
| // Metrics is consistent and valid even if some metrics are not consistent with | ||||
| // their own Desc or a Desc provided by their registered Collector. Well-behaved | ||||
| // Collectors and Metrics will only provide consistent Descs. This Registry is | ||||
| // useful to test the implementation of Collectors and Metrics. | ||||
| func NewPedanticRegistry() *Registry { | ||||
| 	r := NewRegistry() | ||||
| 	r.pedanticChecksEnabled = true | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| // Registerer is the interface for the part of a registry in charge of | ||||
| // registering and unregistering. Users of custom registries should use | ||||
| // Registerer as type for registration purposes (rather than the Registry type | ||||
| // directly). In that way, they are free to use custom Registerer implementation | ||||
| // (e.g. for testing purposes). | ||||
| type Registerer interface { | ||||
| 	// Register registers a new Collector to be included in metrics | ||||
| 	// collection. It returns an error if the descriptors provided by the | ||||
| 	// Collector are invalid or if they — in combination with descriptors of | ||||
| 	// already registered Collectors — do not fulfill the consistency and | ||||
| 	// uniqueness criteria described in the documentation of metric.Desc. | ||||
| 	// | ||||
| 	// If the provided Collector is equal to a Collector already registered | ||||
| 	// (which includes the case of re-registering the same Collector), the | ||||
| 	// returned error is an instance of AlreadyRegisteredError, which | ||||
| 	// contains the previously registered Collector. | ||||
| 	// | ||||
| 	// A Collector whose Describe method does not yield any Desc is treated | ||||
| 	// as unchecked. Registration will always succeed. No check for | ||||
| 	// re-registering (see previous paragraph) is performed. Thus, the | ||||
| 	// caller is responsible for not double-registering the same unchecked | ||||
| 	// Collector, and for providing a Collector that will not cause | ||||
| 	// inconsistent metrics on collection. (This would lead to scrape | ||||
| 	// errors.) | ||||
| 	Register(Collector) error | ||||
| 	// MustRegister works like Register but registers any number of | ||||
| 	// Collectors and panics upon the first registration that causes an | ||||
| 	// error. | ||||
| 	MustRegister(...Collector) | ||||
| 	// Unregister unregisters the Collector that equals the Collector passed | ||||
| 	// in as an argument.  (Two Collectors are considered equal if their | ||||
| 	// Describe method yields the same set of descriptors.) The function | ||||
| 	// returns whether a Collector was unregistered. Note that an unchecked | ||||
| 	// Collector cannot be unregistered (as its Describe method does not | ||||
| 	// yield any descriptor). | ||||
| 	// | ||||
| 	// Note that even after unregistering, it will not be possible to | ||||
| 	// register a new Collector that is inconsistent with the unregistered | ||||
| 	// Collector, e.g. a Collector collecting metrics with the same name but | ||||
| 	// a different help string. The rationale here is that the same registry | ||||
| 	// instance must only collect consistent metrics throughout its | ||||
| 	// lifetime. | ||||
| 	Unregister(Collector) bool | ||||
| } | ||||
|  | ||||
| // Gatherer is the interface for the part of a registry in charge of gathering | ||||
| // the collected metrics into a number of MetricFamilies. The Gatherer interface | ||||
| // comes with the same general implication as described for the Registerer | ||||
| // interface. | ||||
| type Gatherer interface { | ||||
| 	// Gather calls the Collect method of the registered Collectors and then | ||||
| 	// gathers the collected metrics into a lexicographically sorted slice | ||||
| 	// of uniquely named MetricFamily protobufs. Gather ensures that the | ||||
| 	// returned slice is valid and self-consistent so that it can be used | ||||
| 	// for valid exposition. As an exception to the strict consistency | ||||
| 	// requirements described for metric.Desc, Gather will tolerate | ||||
| 	// different sets of label names for metrics of the same metric family. | ||||
| 	// | ||||
| 	// Even if an error occurs, Gather attempts to gather as many metrics as | ||||
| 	// possible. Hence, if a non-nil error is returned, the returned | ||||
| 	// MetricFamily slice could be nil (in case of a fatal error that | ||||
| 	// prevented any meaningful metric collection) or contain a number of | ||||
| 	// MetricFamily protobufs, some of which might be incomplete, and some | ||||
| 	// might be missing altogether. The returned error (which might be a | ||||
| 	// MultiError) explains the details. Note that this is mostly useful for | ||||
| 	// debugging purposes. If the gathered protobufs are to be used for | ||||
| 	// exposition in actual monitoring, it is almost always better to not | ||||
| 	// expose an incomplete result and instead disregard the returned | ||||
| 	// MetricFamily protobufs in case the returned error is non-nil. | ||||
| 	Gather() ([]*dto.MetricFamily, error) | ||||
| } | ||||
|  | ||||
| // Register registers the provided Collector with the DefaultRegisterer. | ||||
| // | ||||
| // Register is a shortcut for DefaultRegisterer.Register(c). See there for more | ||||
| // details. | ||||
| func Register(c Collector) error { | ||||
| 	return DefaultRegisterer.Register(c) | ||||
| } | ||||
|  | ||||
| // MustRegister registers the provided Collectors with the DefaultRegisterer and | ||||
| // panics if any error occurs. | ||||
| // | ||||
| // MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See | ||||
| // there for more details. | ||||
| func MustRegister(cs ...Collector) { | ||||
| 	DefaultRegisterer.MustRegister(cs...) | ||||
| } | ||||
|  | ||||
| // Unregister removes the registration of the provided Collector from the | ||||
| // DefaultRegisterer. | ||||
| // | ||||
| // Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for | ||||
| // more details. | ||||
| func Unregister(c Collector) bool { | ||||
| 	return DefaultRegisterer.Unregister(c) | ||||
| } | ||||
|  | ||||
| // GathererFunc turns a function into a Gatherer. | ||||
| type GathererFunc func() ([]*dto.MetricFamily, error) | ||||
|  | ||||
| // Gather implements Gatherer. | ||||
| func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { | ||||
| 	return gf() | ||||
| } | ||||
|  | ||||
| // AlreadyRegisteredError is returned by the Register method if the Collector to | ||||
| // be registered has already been registered before, or a different Collector | ||||
| // that collects the same metrics has been registered before. Registration fails | ||||
| // in that case, but you can detect from the kind of error what has | ||||
| // happened. The error contains fields for the existing Collector and the | ||||
| // (rejected) new Collector that equals the existing one. This can be used to | ||||
| // find out if an equal Collector has been registered before and switch over to | ||||
| // using the old one, as demonstrated in the example. | ||||
| type AlreadyRegisteredError struct { | ||||
| 	ExistingCollector, NewCollector Collector | ||||
| } | ||||
|  | ||||
| func (err AlreadyRegisteredError) Error() string { | ||||
| 	return "duplicate metrics collector registration attempted" | ||||
| } | ||||
|  | ||||
| // MultiError is a slice of errors implementing the error interface. It is used | ||||
| // by a Gatherer to report multiple errors during MetricFamily gathering. | ||||
| type MultiError []error | ||||
|  | ||||
| // Error formats the contained errors as a bullet point list, preceded by the | ||||
| // total number of errors. Note that this results in a multi-line string. | ||||
| func (errs MultiError) Error() string { | ||||
| 	if len(errs) == 0 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	buf := &bytes.Buffer{} | ||||
| 	fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) | ||||
| 	for _, err := range errs { | ||||
| 		fmt.Fprintf(buf, "\n* %s", err) | ||||
| 	} | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // Append appends the provided error if it is not nil. | ||||
| func (errs *MultiError) Append(err error) { | ||||
| 	if err != nil { | ||||
| 		*errs = append(*errs, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only | ||||
| // contained error as error if len(errs is 1). In all other cases, it returns | ||||
| // the MultiError directly. This is helpful for returning a MultiError in a way | ||||
| // that only uses the MultiError if needed. | ||||
| func (errs MultiError) MaybeUnwrap() error { | ||||
| 	switch len(errs) { | ||||
| 	case 0: | ||||
| 		return nil | ||||
| 	case 1: | ||||
| 		return errs[0] | ||||
| 	default: | ||||
| 		return errs | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Registry registers Prometheus collectors, collects their metrics, and gathers | ||||
| // them into MetricFamilies for exposition. It implements both Registerer and | ||||
| // Gatherer. The zero value is not usable. Create instances with NewRegistry or | ||||
| // NewPedanticRegistry. | ||||
| type Registry struct { | ||||
| 	mtx                   sync.RWMutex | ||||
| 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs. | ||||
| 	descIDs               map[uint64]struct{} | ||||
| 	dimHashesByName       map[string]uint64 | ||||
| 	uncheckedCollectors   []Collector | ||||
| 	pedanticChecksEnabled bool | ||||
| } | ||||
|  | ||||
| // Register implements Registerer. | ||||
| func (r *Registry) Register(c Collector) error { | ||||
| 	var ( | ||||
| 		descChan           = make(chan *Desc, capDescChan) | ||||
| 		newDescIDs         = map[uint64]struct{}{} | ||||
| 		newDimHashesByName = map[string]uint64{} | ||||
| 		collectorID        uint64 // All desc IDs XOR'd together. | ||||
| 		duplicateDescErr   error | ||||
| 	) | ||||
| 	go func() { | ||||
| 		c.Describe(descChan) | ||||
| 		close(descChan) | ||||
| 	}() | ||||
| 	r.mtx.Lock() | ||||
| 	defer func() { | ||||
| 		// Drain channel in case of premature return to not leak a goroutine. | ||||
| 		for range descChan { | ||||
| 		} | ||||
| 		r.mtx.Unlock() | ||||
| 	}() | ||||
| 	// Conduct various tests... | ||||
| 	for desc := range descChan { | ||||
|  | ||||
| 		// Is the descriptor valid at all? | ||||
| 		if desc.err != nil { | ||||
| 			return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) | ||||
| 		} | ||||
|  | ||||
| 		// Is the descID unique? | ||||
| 		// (In other words: Is the fqName + constLabel combination unique?) | ||||
| 		if _, exists := r.descIDs[desc.id]; exists { | ||||
| 			duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) | ||||
| 		} | ||||
| 		// If it is not a duplicate desc in this collector, XOR it to | ||||
| 		// the collectorID.  (We allow duplicate descs within the same | ||||
| 		// collector, but their existence must be a no-op.) | ||||
| 		if _, exists := newDescIDs[desc.id]; !exists { | ||||
| 			newDescIDs[desc.id] = struct{}{} | ||||
| 			collectorID ^= desc.id | ||||
| 		} | ||||
|  | ||||
| 		// Are all the label names and the help string consistent with | ||||
| 		// previous descriptors of the same name? | ||||
| 		// First check existing descriptors... | ||||
| 		if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { | ||||
| 			if dimHash != desc.dimHash { | ||||
| 				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) | ||||
| 			} | ||||
| 		} else { | ||||
| 			// ...then check the new descriptors already seen. | ||||
| 			if dimHash, exists := newDimHashesByName[desc.fqName]; exists { | ||||
| 				if dimHash != desc.dimHash { | ||||
| 					return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) | ||||
| 				} | ||||
| 			} else { | ||||
| 				newDimHashesByName[desc.fqName] = desc.dimHash | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// A Collector yielding no Desc at all is considered unchecked. | ||||
| 	if len(newDescIDs) == 0 { | ||||
| 		r.uncheckedCollectors = append(r.uncheckedCollectors, c) | ||||
| 		return nil | ||||
| 	} | ||||
| 	if existing, exists := r.collectorsByID[collectorID]; exists { | ||||
| 		switch e := existing.(type) { | ||||
| 		case *wrappingCollector: | ||||
| 			return AlreadyRegisteredError{ | ||||
| 				ExistingCollector: e.unwrapRecursively(), | ||||
| 				NewCollector:      c, | ||||
| 			} | ||||
| 		default: | ||||
| 			return AlreadyRegisteredError{ | ||||
| 				ExistingCollector: e, | ||||
| 				NewCollector:      c, | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// If the collectorID is new, but at least one of the descs existed | ||||
| 	// before, we are in trouble. | ||||
| 	if duplicateDescErr != nil { | ||||
| 		return duplicateDescErr | ||||
| 	} | ||||
|  | ||||
| 	// Only after all tests have passed, actually register. | ||||
| 	r.collectorsByID[collectorID] = c | ||||
| 	for hash := range newDescIDs { | ||||
| 		r.descIDs[hash] = struct{}{} | ||||
| 	} | ||||
| 	for name, dimHash := range newDimHashesByName { | ||||
| 		r.dimHashesByName[name] = dimHash | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Unregister implements Registerer. | ||||
| func (r *Registry) Unregister(c Collector) bool { | ||||
| 	var ( | ||||
| 		descChan    = make(chan *Desc, capDescChan) | ||||
| 		descIDs     = map[uint64]struct{}{} | ||||
| 		collectorID uint64 // All desc IDs XOR'd together. | ||||
| 	) | ||||
| 	go func() { | ||||
| 		c.Describe(descChan) | ||||
| 		close(descChan) | ||||
| 	}() | ||||
| 	for desc := range descChan { | ||||
| 		if _, exists := descIDs[desc.id]; !exists { | ||||
| 			collectorID ^= desc.id | ||||
| 			descIDs[desc.id] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	r.mtx.RLock() | ||||
| 	if _, exists := r.collectorsByID[collectorID]; !exists { | ||||
| 		r.mtx.RUnlock() | ||||
| 		return false | ||||
| 	} | ||||
| 	r.mtx.RUnlock() | ||||
|  | ||||
| 	r.mtx.Lock() | ||||
| 	defer r.mtx.Unlock() | ||||
|  | ||||
| 	delete(r.collectorsByID, collectorID) | ||||
| 	for id := range descIDs { | ||||
| 		delete(r.descIDs, id) | ||||
| 	} | ||||
| 	// dimHashesByName is left untouched as those must be consistent | ||||
| 	// throughout the lifetime of a program. | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // MustRegister implements Registerer. | ||||
| func (r *Registry) MustRegister(cs ...Collector) { | ||||
| 	for _, c := range cs { | ||||
| 		if err := r.Register(c); err != nil { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Gather implements Gatherer. | ||||
| func (r *Registry) Gather() ([]*dto.MetricFamily, error) { | ||||
| 	var ( | ||||
| 		checkedMetricChan   = make(chan Metric, capMetricChan) | ||||
| 		uncheckedMetricChan = make(chan Metric, capMetricChan) | ||||
| 		metricHashes        = map[uint64]struct{}{} | ||||
| 		wg                  sync.WaitGroup | ||||
| 		errs                MultiError          // The collected errors to return in the end. | ||||
| 		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks | ||||
| 	) | ||||
|  | ||||
| 	r.mtx.RLock() | ||||
| 	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) | ||||
| 	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) | ||||
| 	checkedCollectors := make(chan Collector, len(r.collectorsByID)) | ||||
| 	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) | ||||
| 	for _, collector := range r.collectorsByID { | ||||
| 		checkedCollectors <- collector | ||||
| 	} | ||||
| 	for _, collector := range r.uncheckedCollectors { | ||||
| 		uncheckedCollectors <- collector | ||||
| 	} | ||||
| 	// In case pedantic checks are enabled, we have to copy the map before | ||||
| 	// giving up the RLock. | ||||
| 	if r.pedanticChecksEnabled { | ||||
| 		registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) | ||||
| 		for id := range r.descIDs { | ||||
| 			registeredDescIDs[id] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
| 	r.mtx.RUnlock() | ||||
|  | ||||
| 	wg.Add(goroutineBudget) | ||||
|  | ||||
| 	collectWorker := func() { | ||||
| 		for { | ||||
| 			select { | ||||
| 			case collector := <-checkedCollectors: | ||||
| 				collector.Collect(checkedMetricChan) | ||||
| 			case collector := <-uncheckedCollectors: | ||||
| 				collector.Collect(uncheckedMetricChan) | ||||
| 			default: | ||||
| 				return | ||||
| 			} | ||||
| 			wg.Done() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Start the first worker now to make sure at least one is running. | ||||
| 	go collectWorker() | ||||
| 	goroutineBudget-- | ||||
|  | ||||
| 	// Close checkedMetricChan and uncheckedMetricChan once all collectors | ||||
| 	// are collected. | ||||
| 	go func() { | ||||
| 		wg.Wait() | ||||
| 		close(checkedMetricChan) | ||||
| 		close(uncheckedMetricChan) | ||||
| 	}() | ||||
|  | ||||
| 	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return. | ||||
| 	defer func() { | ||||
| 		if checkedMetricChan != nil { | ||||
| 			for range checkedMetricChan { | ||||
| 			} | ||||
| 		} | ||||
| 		if uncheckedMetricChan != nil { | ||||
| 			for range uncheckedMetricChan { | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	// Copy the channel references so we can nil them out later to remove | ||||
| 	// them from the select statements below. | ||||
| 	cmc := checkedMetricChan | ||||
| 	umc := uncheckedMetricChan | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case metric, ok := <-cmc: | ||||
| 			if !ok { | ||||
| 				cmc = nil | ||||
| 				break | ||||
| 			} | ||||
| 			errs.Append(processMetric( | ||||
| 				metric, metricFamiliesByName, | ||||
| 				metricHashes, | ||||
| 				registeredDescIDs, | ||||
| 			)) | ||||
| 		case metric, ok := <-umc: | ||||
| 			if !ok { | ||||
| 				umc = nil | ||||
| 				break | ||||
| 			} | ||||
| 			errs.Append(processMetric( | ||||
| 				metric, metricFamiliesByName, | ||||
| 				metricHashes, | ||||
| 				nil, | ||||
| 			)) | ||||
| 		default: | ||||
| 			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { | ||||
| 				// All collectors are already being worked on or | ||||
| 				// we have already as many goroutines started as | ||||
| 				// there are collectors. Do the same as above, | ||||
| 				// just without the default. | ||||
| 				select { | ||||
| 				case metric, ok := <-cmc: | ||||
| 					if !ok { | ||||
| 						cmc = nil | ||||
| 						break | ||||
| 					} | ||||
| 					errs.Append(processMetric( | ||||
| 						metric, metricFamiliesByName, | ||||
| 						metricHashes, | ||||
| 						registeredDescIDs, | ||||
| 					)) | ||||
| 				case metric, ok := <-umc: | ||||
| 					if !ok { | ||||
| 						umc = nil | ||||
| 						break | ||||
| 					} | ||||
| 					errs.Append(processMetric( | ||||
| 						metric, metricFamiliesByName, | ||||
| 						metricHashes, | ||||
| 						nil, | ||||
| 					)) | ||||
| 				} | ||||
| 				break | ||||
| 			} | ||||
| 			// Start more workers. | ||||
| 			go collectWorker() | ||||
| 			goroutineBudget-- | ||||
| 			runtime.Gosched() | ||||
| 		} | ||||
| 		// Once both checkedMetricChan and uncheckdMetricChan are closed | ||||
| 		// and drained, the contraption above will nil out cmc and umc, | ||||
| 		// and then we can leave the collect loop here. | ||||
| 		if cmc == nil && umc == nil { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | ||||
| } | ||||
|  | ||||
| // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the | ||||
| // Prometheus text format, and writes it to a temporary file. Upon success, the | ||||
| // temporary file is renamed to the provided filename. | ||||
| // | ||||
| // This is intended for use with the textfile collector of the node exporter. | ||||
| // Note that the node exporter expects the filename to be suffixed with ".prom". | ||||
| func WriteToTextfile(filename string, g Gatherer) error { | ||||
| 	tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer os.Remove(tmp.Name()) | ||||
|  | ||||
| 	mfs, err := g.Gather() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	for _, mf := range mfs { | ||||
| 		if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	if err := tmp.Close(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := os.Chmod(tmp.Name(), 0644); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return os.Rename(tmp.Name(), filename) | ||||
| } | ||||
|  | ||||
| // processMetric is an internal helper method only used by the Gather method. | ||||
| func processMetric( | ||||
| 	metric Metric, | ||||
| 	metricFamiliesByName map[string]*dto.MetricFamily, | ||||
| 	metricHashes map[uint64]struct{}, | ||||
| 	registeredDescIDs map[uint64]struct{}, | ||||
| ) error { | ||||
| 	desc := metric.Desc() | ||||
| 	// Wrapped metrics collected by an unchecked Collector can have an | ||||
| 	// invalid Desc. | ||||
| 	if desc.err != nil { | ||||
| 		return desc.err | ||||
| 	} | ||||
| 	dtoMetric := &dto.Metric{} | ||||
| 	if err := metric.Write(dtoMetric); err != nil { | ||||
| 		return fmt.Errorf("error collecting metric %v: %s", desc, err) | ||||
| 	} | ||||
| 	metricFamily, ok := metricFamiliesByName[desc.fqName] | ||||
| 	if ok { // Existing name. | ||||
| 		if metricFamily.GetHelp() != desc.help { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %s %s has help %q but should have %q", | ||||
| 				desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), | ||||
| 			) | ||||
| 		} | ||||
| 		// TODO(beorn7): Simplify switch once Desc has type. | ||||
| 		switch metricFamily.GetType() { | ||||
| 		case dto.MetricType_COUNTER: | ||||
| 			if dtoMetric.Counter == nil { | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric %s %s should be a Counter", | ||||
| 					desc.fqName, dtoMetric, | ||||
| 				) | ||||
| 			} | ||||
| 		case dto.MetricType_GAUGE: | ||||
| 			if dtoMetric.Gauge == nil { | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric %s %s should be a Gauge", | ||||
| 					desc.fqName, dtoMetric, | ||||
| 				) | ||||
| 			} | ||||
| 		case dto.MetricType_SUMMARY: | ||||
| 			if dtoMetric.Summary == nil { | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric %s %s should be a Summary", | ||||
| 					desc.fqName, dtoMetric, | ||||
| 				) | ||||
| 			} | ||||
| 		case dto.MetricType_UNTYPED: | ||||
| 			if dtoMetric.Untyped == nil { | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric %s %s should be Untyped", | ||||
| 					desc.fqName, dtoMetric, | ||||
| 				) | ||||
| 			} | ||||
| 		case dto.MetricType_HISTOGRAM: | ||||
| 			if dtoMetric.Histogram == nil { | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric %s %s should be a Histogram", | ||||
| 					desc.fqName, dtoMetric, | ||||
| 				) | ||||
| 			} | ||||
| 		default: | ||||
| 			panic("encountered MetricFamily with invalid type") | ||||
| 		} | ||||
| 	} else { // New name. | ||||
| 		metricFamily = &dto.MetricFamily{} | ||||
| 		metricFamily.Name = proto.String(desc.fqName) | ||||
| 		metricFamily.Help = proto.String(desc.help) | ||||
| 		// TODO(beorn7): Simplify switch once Desc has type. | ||||
| 		switch { | ||||
| 		case dtoMetric.Gauge != nil: | ||||
| 			metricFamily.Type = dto.MetricType_GAUGE.Enum() | ||||
| 		case dtoMetric.Counter != nil: | ||||
| 			metricFamily.Type = dto.MetricType_COUNTER.Enum() | ||||
| 		case dtoMetric.Summary != nil: | ||||
| 			metricFamily.Type = dto.MetricType_SUMMARY.Enum() | ||||
| 		case dtoMetric.Untyped != nil: | ||||
| 			metricFamily.Type = dto.MetricType_UNTYPED.Enum() | ||||
| 		case dtoMetric.Histogram != nil: | ||||
| 			metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() | ||||
| 		default: | ||||
| 			return fmt.Errorf("empty metric collected: %s", dtoMetric) | ||||
| 		} | ||||
| 		if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		metricFamiliesByName[desc.fqName] = metricFamily | ||||
| 	} | ||||
| 	if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if registeredDescIDs != nil { | ||||
| 		// Is the desc registered at all? | ||||
| 		if _, exist := registeredDescIDs[desc.id]; !exist { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %s %s with unregistered descriptor %s", | ||||
| 				metricFamily.GetName(), dtoMetric, desc, | ||||
| 			) | ||||
| 		} | ||||
| 		if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	metricFamily.Metric = append(metricFamily.Metric, dtoMetric) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Gatherers is a slice of Gatherer instances that implements the Gatherer | ||||
| // interface itself. Its Gather method calls Gather on all Gatherers in the | ||||
| // slice in order and returns the merged results. Errors returned from the | ||||
| // Gather calls are all returned in a flattened MultiError. Duplicate and | ||||
| // inconsistent Metrics are skipped (first occurrence in slice order wins) and | ||||
| // reported in the returned error. | ||||
| // | ||||
| // Gatherers can be used to merge the Gather results from multiple | ||||
| // Registries. It also provides a way to directly inject existing MetricFamily | ||||
| // protobufs into the gathering by creating a custom Gatherer with a Gather | ||||
| // method that simply returns the existing MetricFamily protobufs. Note that no | ||||
| // registration is involved (in contrast to Collector registration), so | ||||
| // obviously registration-time checks cannot happen. Any inconsistencies between | ||||
| // the gathered MetricFamilies are reported as errors by the Gather method, and | ||||
| // inconsistent Metrics are dropped. Invalid parts of the MetricFamilies | ||||
| // (e.g. syntactically invalid metric or label names) will go undetected. | ||||
| type Gatherers []Gatherer | ||||
|  | ||||
| // Gather implements Gatherer. | ||||
| func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { | ||||
| 	var ( | ||||
| 		metricFamiliesByName = map[string]*dto.MetricFamily{} | ||||
| 		metricHashes         = map[uint64]struct{}{} | ||||
| 		errs                 MultiError // The collected errors to return in the end. | ||||
| 	) | ||||
|  | ||||
| 	for i, g := range gs { | ||||
| 		mfs, err := g.Gather() | ||||
| 		if err != nil { | ||||
| 			if multiErr, ok := err.(MultiError); ok { | ||||
| 				for _, err := range multiErr { | ||||
| 					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) | ||||
| 				} | ||||
| 			} else { | ||||
| 				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) | ||||
| 			} | ||||
| 		} | ||||
| 		for _, mf := range mfs { | ||||
| 			existingMF, exists := metricFamiliesByName[mf.GetName()] | ||||
| 			if exists { | ||||
| 				if existingMF.GetHelp() != mf.GetHelp() { | ||||
| 					errs = append(errs, fmt.Errorf( | ||||
| 						"gathered metric family %s has help %q but should have %q", | ||||
| 						mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), | ||||
| 					)) | ||||
| 					continue | ||||
| 				} | ||||
| 				if existingMF.GetType() != mf.GetType() { | ||||
| 					errs = append(errs, fmt.Errorf( | ||||
| 						"gathered metric family %s has type %s but should have %s", | ||||
| 						mf.GetName(), mf.GetType(), existingMF.GetType(), | ||||
| 					)) | ||||
| 					continue | ||||
| 				} | ||||
| 			} else { | ||||
| 				existingMF = &dto.MetricFamily{} | ||||
| 				existingMF.Name = mf.Name | ||||
| 				existingMF.Help = mf.Help | ||||
| 				existingMF.Type = mf.Type | ||||
| 				if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { | ||||
| 					errs = append(errs, err) | ||||
| 					continue | ||||
| 				} | ||||
| 				metricFamiliesByName[mf.GetName()] = existingMF | ||||
| 			} | ||||
| 			for _, m := range mf.Metric { | ||||
| 				if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { | ||||
| 					errs = append(errs, err) | ||||
| 					continue | ||||
| 				} | ||||
| 				existingMF.Metric = append(existingMF.Metric, m) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | ||||
| } | ||||
|  | ||||
| // checkSuffixCollisions checks for collisions with the “magic” suffixes the | ||||
| // Prometheus text format and the internal metric representation of the | ||||
| // Prometheus server add while flattening Summaries and Histograms. | ||||
| func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { | ||||
| 	var ( | ||||
| 		newName              = mf.GetName() | ||||
| 		newType              = mf.GetType() | ||||
| 		newNameWithoutSuffix = "" | ||||
| 	) | ||||
| 	switch { | ||||
| 	case strings.HasSuffix(newName, "_count"): | ||||
| 		newNameWithoutSuffix = newName[:len(newName)-6] | ||||
| 	case strings.HasSuffix(newName, "_sum"): | ||||
| 		newNameWithoutSuffix = newName[:len(newName)-4] | ||||
| 	case strings.HasSuffix(newName, "_bucket"): | ||||
| 		newNameWithoutSuffix = newName[:len(newName)-7] | ||||
| 	} | ||||
| 	if newNameWithoutSuffix != "" { | ||||
| 		if existingMF, ok := mfs[newNameWithoutSuffix]; ok { | ||||
| 			switch existingMF.GetType() { | ||||
| 			case dto.MetricType_SUMMARY: | ||||
| 				if !strings.HasSuffix(newName, "_bucket") { | ||||
| 					return fmt.Errorf( | ||||
| 						"collected metric named %q collides with previously collected summary named %q", | ||||
| 						newName, newNameWithoutSuffix, | ||||
| 					) | ||||
| 				} | ||||
| 			case dto.MetricType_HISTOGRAM: | ||||
| 				return fmt.Errorf( | ||||
| 					"collected metric named %q collides with previously collected histogram named %q", | ||||
| 					newName, newNameWithoutSuffix, | ||||
| 				) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { | ||||
| 		if _, ok := mfs[newName+"_count"]; ok { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected histogram or summary named %q collides with previously collected metric named %q", | ||||
| 				newName, newName+"_count", | ||||
| 			) | ||||
| 		} | ||||
| 		if _, ok := mfs[newName+"_sum"]; ok { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected histogram or summary named %q collides with previously collected metric named %q", | ||||
| 				newName, newName+"_sum", | ||||
| 			) | ||||
| 		} | ||||
| 	} | ||||
| 	if newType == dto.MetricType_HISTOGRAM { | ||||
| 		if _, ok := mfs[newName+"_bucket"]; ok { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected histogram named %q collides with previously collected metric named %q", | ||||
| 				newName, newName+"_bucket", | ||||
| 			) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // checkMetricConsistency checks if the provided Metric is consistent with the | ||||
| // provided MetricFamily. It also hashes the Metric labels and the MetricFamily | ||||
| // name. If the resulting hash is already in the provided metricHashes, an error | ||||
| // is returned. If not, it is added to metricHashes. | ||||
| func checkMetricConsistency( | ||||
| 	metricFamily *dto.MetricFamily, | ||||
| 	dtoMetric *dto.Metric, | ||||
| 	metricHashes map[uint64]struct{}, | ||||
| ) error { | ||||
| 	name := metricFamily.GetName() | ||||
|  | ||||
| 	// Type consistency with metric family. | ||||
| 	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || | ||||
| 		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || | ||||
| 		metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || | ||||
| 		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || | ||||
| 		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { | ||||
| 		return fmt.Errorf( | ||||
| 			"collected metric %q { %s} is not a %s", | ||||
| 			name, dtoMetric, metricFamily.GetType(), | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	previousLabelName := "" | ||||
| 	for _, labelPair := range dtoMetric.GetLabel() { | ||||
| 		labelName := labelPair.GetName() | ||||
| 		if labelName == previousLabelName { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %q { %s} has two or more labels with the same name: %s", | ||||
| 				name, dtoMetric, labelName, | ||||
| 			) | ||||
| 		} | ||||
| 		if !checkLabelName(labelName) { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %q { %s} has a label with an invalid name: %s", | ||||
| 				name, dtoMetric, labelName, | ||||
| 			) | ||||
| 		} | ||||
| 		if dtoMetric.Summary != nil && labelName == quantileLabel { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %q { %s} must not have an explicit %q label", | ||||
| 				name, dtoMetric, quantileLabel, | ||||
| 			) | ||||
| 		} | ||||
| 		if !utf8.ValidString(labelPair.GetValue()) { | ||||
| 			return fmt.Errorf( | ||||
| 				"collected metric %q { %s} has a label named %q whose value is not utf8: %#v", | ||||
| 				name, dtoMetric, labelName, labelPair.GetValue()) | ||||
| 		} | ||||
| 		previousLabelName = labelName | ||||
| 	} | ||||
|  | ||||
| 	// Is the metric unique (i.e. no other metric with the same name and the same labels)? | ||||
| 	h := xxhash.New() | ||||
| 	h.WriteString(name) | ||||
| 	h.Write(separatorByteSlice) | ||||
| 	// Make sure label pairs are sorted. We depend on it for the consistency | ||||
| 	// check. | ||||
| 	if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { | ||||
| 		// We cannot sort dtoMetric.Label in place as it is immutable by contract. | ||||
| 		copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) | ||||
| 		copy(copiedLabels, dtoMetric.Label) | ||||
| 		sort.Sort(labelPairSorter(copiedLabels)) | ||||
| 		dtoMetric.Label = copiedLabels | ||||
| 	} | ||||
| 	for _, lp := range dtoMetric.Label { | ||||
| 		h.WriteString(lp.GetName()) | ||||
| 		h.Write(separatorByteSlice) | ||||
| 		h.WriteString(lp.GetValue()) | ||||
| 		h.Write(separatorByteSlice) | ||||
| 	} | ||||
| 	hSum := h.Sum64() | ||||
| 	if _, exists := metricHashes[hSum]; exists { | ||||
| 		return fmt.Errorf( | ||||
| 			"collected metric %q { %s} was collected before with the same name and label values", | ||||
| 			name, dtoMetric, | ||||
| 		) | ||||
| 	} | ||||
| 	metricHashes[hSum] = struct{}{} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func checkDescConsistency( | ||||
| 	metricFamily *dto.MetricFamily, | ||||
| 	dtoMetric *dto.Metric, | ||||
| 	desc *Desc, | ||||
| ) error { | ||||
| 	// Desc help consistency with metric family help. | ||||
| 	if metricFamily.GetHelp() != desc.help { | ||||
| 		return fmt.Errorf( | ||||
| 			"collected metric %s %s has help %q but should have %q", | ||||
| 			metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	// Is the desc consistent with the content of the metric? | ||||
| 	lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) | ||||
| 	copy(lpsFromDesc, desc.constLabelPairs) | ||||
| 	for _, l := range desc.variableLabels { | ||||
| 		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ | ||||
| 			Name: proto.String(l), | ||||
| 		}) | ||||
| 	} | ||||
| 	if len(lpsFromDesc) != len(dtoMetric.Label) { | ||||
| 		return fmt.Errorf( | ||||
| 			"labels in collected metric %s %s are inconsistent with descriptor %s", | ||||
| 			metricFamily.GetName(), dtoMetric, desc, | ||||
| 		) | ||||
| 	} | ||||
| 	sort.Sort(labelPairSorter(lpsFromDesc)) | ||||
| 	for i, lpFromDesc := range lpsFromDesc { | ||||
| 		lpFromMetric := dtoMetric.Label[i] | ||||
| 		if lpFromDesc.GetName() != lpFromMetric.GetName() || | ||||
| 			lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { | ||||
| 			return fmt.Errorf( | ||||
| 				"labels in collected metric %s %s are inconsistent with descriptor %s", | ||||
| 				metricFamily.GetName(), dtoMetric, desc, | ||||
| 			) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										737
									
								
								vendor/github.com/prometheus/client_golang/prometheus/summary.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										737
									
								
								vendor/github.com/prometheus/client_golang/prometheus/summary.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,737 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"runtime" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/beorn7/perks/quantile" | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // quantileLabel is used for the label that defines the quantile in a | ||||
| // summary. | ||||
| const quantileLabel = "quantile" | ||||
|  | ||||
| // A Summary captures individual observations from an event or sample stream and | ||||
| // summarizes them in a manner similar to traditional summary statistics: 1. sum | ||||
| // of observations, 2. observation count, 3. rank estimations. | ||||
| // | ||||
| // A typical use-case is the observation of request latencies. By default, a | ||||
| // Summary provides the median, the 90th and the 99th percentile of the latency | ||||
| // as rank estimations. However, the default behavior will change in the | ||||
| // upcoming v1.0.0 of the library. There will be no rank estimations at all by | ||||
| // default. For a sane transition, it is recommended to set the desired rank | ||||
| // estimations explicitly. | ||||
| // | ||||
| // Note that the rank estimations cannot be aggregated in a meaningful way with | ||||
| // the Prometheus query language (i.e. you cannot average or add them). If you | ||||
| // need aggregatable quantiles (e.g. you want the 99th percentile latency of all | ||||
| // queries served across all instances of a service), consider the Histogram | ||||
| // metric type. See the Prometheus documentation for more details. | ||||
| // | ||||
| // To create Summary instances, use NewSummary. | ||||
| type Summary interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
|  | ||||
| 	// Observe adds a single observation to the summary. | ||||
| 	Observe(float64) | ||||
| } | ||||
|  | ||||
| var errQuantileLabelNotAllowed = fmt.Errorf( | ||||
| 	"%q is not allowed as label name in summaries", quantileLabel, | ||||
| ) | ||||
|  | ||||
| // Default values for SummaryOpts. | ||||
| const ( | ||||
| 	// DefMaxAge is the default duration for which observations stay | ||||
| 	// relevant. | ||||
| 	DefMaxAge time.Duration = 10 * time.Minute | ||||
| 	// DefAgeBuckets is the default number of buckets used to calculate the | ||||
| 	// age of observations. | ||||
| 	DefAgeBuckets = 5 | ||||
| 	// DefBufCap is the standard buffer size for collecting Summary observations. | ||||
| 	DefBufCap = 500 | ||||
| ) | ||||
|  | ||||
| // SummaryOpts bundles the options for creating a Summary metric. It is | ||||
| // mandatory to set Name to a non-empty string. While all other fields are | ||||
| // optional and can safely be left at their zero value, it is recommended to set | ||||
| // a help string and to explicitly set the Objectives field to the desired value | ||||
| // as the default value will change in the upcoming v1.0.0 of the library. | ||||
| type SummaryOpts struct { | ||||
| 	// Namespace, Subsystem, and Name are components of the fully-qualified | ||||
| 	// name of the Summary (created by joining these components with | ||||
| 	// "_"). Only Name is mandatory, the others merely help structuring the | ||||
| 	// name. Note that the fully-qualified name of the Summary must be a | ||||
| 	// valid Prometheus metric name. | ||||
| 	Namespace string | ||||
| 	Subsystem string | ||||
| 	Name      string | ||||
|  | ||||
| 	// Help provides information about this Summary. | ||||
| 	// | ||||
| 	// Metrics with the same fully-qualified name must have the same Help | ||||
| 	// string. | ||||
| 	Help string | ||||
|  | ||||
| 	// ConstLabels are used to attach fixed labels to this metric. Metrics | ||||
| 	// with the same fully-qualified name must have the same label names in | ||||
| 	// their ConstLabels. | ||||
| 	// | ||||
| 	// Due to the way a Summary is represented in the Prometheus text format | ||||
| 	// and how it is handled by the Prometheus server internally, “quantile” | ||||
| 	// is an illegal label name. Construction of a Summary or SummaryVec | ||||
| 	// will panic if this label name is used in ConstLabels. | ||||
| 	// | ||||
| 	// ConstLabels are only used rarely. In particular, do not use them to | ||||
| 	// attach the same labels to all your metrics. Those use cases are | ||||
| 	// better covered by target labels set by the scraping Prometheus | ||||
| 	// server, or by one specific metric (e.g. a build_info or a | ||||
| 	// machine_role metric). See also | ||||
| 	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels | ||||
| 	ConstLabels Labels | ||||
|  | ||||
| 	// Objectives defines the quantile rank estimates with their respective | ||||
| 	// absolute error. If Objectives[q] = e, then the value reported for q | ||||
| 	// will be the φ-quantile value for some φ between q-e and q+e.  The | ||||
| 	// default value is an empty map, resulting in a summary without | ||||
| 	// quantiles. | ||||
| 	Objectives map[float64]float64 | ||||
|  | ||||
| 	// MaxAge defines the duration for which an observation stays relevant | ||||
| 	// for the summary. Must be positive. The default value is DefMaxAge. | ||||
| 	MaxAge time.Duration | ||||
|  | ||||
| 	// AgeBuckets is the number of buckets used to exclude observations that | ||||
| 	// are older than MaxAge from the summary. A higher number has a | ||||
| 	// resource penalty, so only increase it if the higher resolution is | ||||
| 	// really required. For very high observation rates, you might want to | ||||
| 	// reduce the number of age buckets. With only one age bucket, you will | ||||
| 	// effectively see a complete reset of the summary each time MaxAge has | ||||
| 	// passed. The default value is DefAgeBuckets. | ||||
| 	AgeBuckets uint32 | ||||
|  | ||||
| 	// BufCap defines the default sample stream buffer size.  The default | ||||
| 	// value of DefBufCap should suffice for most uses. If there is a need | ||||
| 	// to increase the value, a multiple of 500 is recommended (because that | ||||
| 	// is the internal buffer size of the underlying package | ||||
| 	// "github.com/bmizerany/perks/quantile"). | ||||
| 	BufCap uint32 | ||||
| } | ||||
|  | ||||
| // Problem with the sliding-window decay algorithm... The Merge method of | ||||
| // perk/quantile is actually not working as advertised - and it might be | ||||
| // unfixable, as the underlying algorithm is apparently not capable of merging | ||||
| // summaries in the first place. To avoid using Merge, we are currently adding | ||||
| // observations to _each_ age bucket, i.e. the effort to add a sample is | ||||
| // essentially multiplied by the number of age buckets. When rotating age | ||||
| // buckets, we empty the previous head stream. On scrape time, we simply take | ||||
| // the quantiles from the head stream (no merging required). Result: More effort | ||||
| // on observation time, less effort on scrape time, which is exactly the | ||||
| // opposite of what we try to accomplish, but at least the results are correct. | ||||
| // | ||||
| // The quite elegant previous contraption to merge the age buckets efficiently | ||||
| // on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) | ||||
| // can't be used anymore. | ||||
|  | ||||
| // NewSummary creates a new Summary based on the provided SummaryOpts. | ||||
| func NewSummary(opts SummaryOpts) Summary { | ||||
| 	return newSummary( | ||||
| 		NewDesc( | ||||
| 			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 			opts.Help, | ||||
| 			nil, | ||||
| 			opts.ConstLabels, | ||||
| 		), | ||||
| 		opts, | ||||
| 	) | ||||
| } | ||||
|  | ||||
| func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { | ||||
| 	if len(desc.variableLabels) != len(labelValues) { | ||||
| 		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) | ||||
| 	} | ||||
|  | ||||
| 	for _, n := range desc.variableLabels { | ||||
| 		if n == quantileLabel { | ||||
| 			panic(errQuantileLabelNotAllowed) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, lp := range desc.constLabelPairs { | ||||
| 		if lp.GetName() == quantileLabel { | ||||
| 			panic(errQuantileLabelNotAllowed) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if opts.Objectives == nil { | ||||
| 		opts.Objectives = map[float64]float64{} | ||||
| 	} | ||||
|  | ||||
| 	if opts.MaxAge < 0 { | ||||
| 		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) | ||||
| 	} | ||||
| 	if opts.MaxAge == 0 { | ||||
| 		opts.MaxAge = DefMaxAge | ||||
| 	} | ||||
|  | ||||
| 	if opts.AgeBuckets == 0 { | ||||
| 		opts.AgeBuckets = DefAgeBuckets | ||||
| 	} | ||||
|  | ||||
| 	if opts.BufCap == 0 { | ||||
| 		opts.BufCap = DefBufCap | ||||
| 	} | ||||
|  | ||||
| 	if len(opts.Objectives) == 0 { | ||||
| 		// Use the lock-free implementation of a Summary without objectives. | ||||
| 		s := &noObjectivesSummary{ | ||||
| 			desc:       desc, | ||||
| 			labelPairs: MakeLabelPairs(desc, labelValues), | ||||
| 			counts:     [2]*summaryCounts{{}, {}}, | ||||
| 		} | ||||
| 		s.init(s) // Init self-collection. | ||||
| 		return s | ||||
| 	} | ||||
|  | ||||
| 	s := &summary{ | ||||
| 		desc: desc, | ||||
|  | ||||
| 		objectives:       opts.Objectives, | ||||
| 		sortedObjectives: make([]float64, 0, len(opts.Objectives)), | ||||
|  | ||||
| 		labelPairs: MakeLabelPairs(desc, labelValues), | ||||
|  | ||||
| 		hotBuf:         make([]float64, 0, opts.BufCap), | ||||
| 		coldBuf:        make([]float64, 0, opts.BufCap), | ||||
| 		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), | ||||
| 	} | ||||
| 	s.headStreamExpTime = time.Now().Add(s.streamDuration) | ||||
| 	s.hotBufExpTime = s.headStreamExpTime | ||||
|  | ||||
| 	for i := uint32(0); i < opts.AgeBuckets; i++ { | ||||
| 		s.streams = append(s.streams, s.newStream()) | ||||
| 	} | ||||
| 	s.headStream = s.streams[0] | ||||
|  | ||||
| 	for qu := range s.objectives { | ||||
| 		s.sortedObjectives = append(s.sortedObjectives, qu) | ||||
| 	} | ||||
| 	sort.Float64s(s.sortedObjectives) | ||||
|  | ||||
| 	s.init(s) // Init self-collection. | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| type summary struct { | ||||
| 	selfCollector | ||||
|  | ||||
| 	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. | ||||
| 	mtx    sync.Mutex // Protects every other moving part. | ||||
| 	// Lock bufMtx before mtx if both are needed. | ||||
|  | ||||
| 	desc *Desc | ||||
|  | ||||
| 	objectives       map[float64]float64 | ||||
| 	sortedObjectives []float64 | ||||
|  | ||||
| 	labelPairs []*dto.LabelPair | ||||
|  | ||||
| 	sum float64 | ||||
| 	cnt uint64 | ||||
|  | ||||
| 	hotBuf, coldBuf []float64 | ||||
|  | ||||
| 	streams                          []*quantile.Stream | ||||
| 	streamDuration                   time.Duration | ||||
| 	headStream                       *quantile.Stream | ||||
| 	headStreamIdx                    int | ||||
| 	headStreamExpTime, hotBufExpTime time.Time | ||||
| } | ||||
|  | ||||
| func (s *summary) Desc() *Desc { | ||||
| 	return s.desc | ||||
| } | ||||
|  | ||||
| func (s *summary) Observe(v float64) { | ||||
| 	s.bufMtx.Lock() | ||||
| 	defer s.bufMtx.Unlock() | ||||
|  | ||||
| 	now := time.Now() | ||||
| 	if now.After(s.hotBufExpTime) { | ||||
| 		s.asyncFlush(now) | ||||
| 	} | ||||
| 	s.hotBuf = append(s.hotBuf, v) | ||||
| 	if len(s.hotBuf) == cap(s.hotBuf) { | ||||
| 		s.asyncFlush(now) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *summary) Write(out *dto.Metric) error { | ||||
| 	sum := &dto.Summary{} | ||||
| 	qs := make([]*dto.Quantile, 0, len(s.objectives)) | ||||
|  | ||||
| 	s.bufMtx.Lock() | ||||
| 	s.mtx.Lock() | ||||
| 	// Swap bufs even if hotBuf is empty to set new hotBufExpTime. | ||||
| 	s.swapBufs(time.Now()) | ||||
| 	s.bufMtx.Unlock() | ||||
|  | ||||
| 	s.flushColdBuf() | ||||
| 	sum.SampleCount = proto.Uint64(s.cnt) | ||||
| 	sum.SampleSum = proto.Float64(s.sum) | ||||
|  | ||||
| 	for _, rank := range s.sortedObjectives { | ||||
| 		var q float64 | ||||
| 		if s.headStream.Count() == 0 { | ||||
| 			q = math.NaN() | ||||
| 		} else { | ||||
| 			q = s.headStream.Query(rank) | ||||
| 		} | ||||
| 		qs = append(qs, &dto.Quantile{ | ||||
| 			Quantile: proto.Float64(rank), | ||||
| 			Value:    proto.Float64(q), | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	s.mtx.Unlock() | ||||
|  | ||||
| 	if len(qs) > 0 { | ||||
| 		sort.Sort(quantSort(qs)) | ||||
| 	} | ||||
| 	sum.Quantile = qs | ||||
|  | ||||
| 	out.Summary = sum | ||||
| 	out.Label = s.labelPairs | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (s *summary) newStream() *quantile.Stream { | ||||
| 	return quantile.NewTargeted(s.objectives) | ||||
| } | ||||
|  | ||||
| // asyncFlush needs bufMtx locked. | ||||
| func (s *summary) asyncFlush(now time.Time) { | ||||
| 	s.mtx.Lock() | ||||
| 	s.swapBufs(now) | ||||
|  | ||||
| 	// Unblock the original goroutine that was responsible for the mutation | ||||
| 	// that triggered the compaction.  But hold onto the global non-buffer | ||||
| 	// state mutex until the operation finishes. | ||||
| 	go func() { | ||||
| 		s.flushColdBuf() | ||||
| 		s.mtx.Unlock() | ||||
| 	}() | ||||
| } | ||||
|  | ||||
| // rotateStreams needs mtx AND bufMtx locked. | ||||
| func (s *summary) maybeRotateStreams() { | ||||
| 	for !s.hotBufExpTime.Equal(s.headStreamExpTime) { | ||||
| 		s.headStream.Reset() | ||||
| 		s.headStreamIdx++ | ||||
| 		if s.headStreamIdx >= len(s.streams) { | ||||
| 			s.headStreamIdx = 0 | ||||
| 		} | ||||
| 		s.headStream = s.streams[s.headStreamIdx] | ||||
| 		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // flushColdBuf needs mtx locked. | ||||
| func (s *summary) flushColdBuf() { | ||||
| 	for _, v := range s.coldBuf { | ||||
| 		for _, stream := range s.streams { | ||||
| 			stream.Insert(v) | ||||
| 		} | ||||
| 		s.cnt++ | ||||
| 		s.sum += v | ||||
| 	} | ||||
| 	s.coldBuf = s.coldBuf[0:0] | ||||
| 	s.maybeRotateStreams() | ||||
| } | ||||
|  | ||||
| // swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. | ||||
| func (s *summary) swapBufs(now time.Time) { | ||||
| 	if len(s.coldBuf) != 0 { | ||||
| 		panic("coldBuf is not empty") | ||||
| 	} | ||||
| 	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf | ||||
| 	// hotBuf is now empty and gets new expiration set. | ||||
| 	for now.After(s.hotBufExpTime) { | ||||
| 		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type summaryCounts struct { | ||||
| 	// sumBits contains the bits of the float64 representing the sum of all | ||||
| 	// observations. sumBits and count have to go first in the struct to | ||||
| 	// guarantee alignment for atomic operations. | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	sumBits uint64 | ||||
| 	count   uint64 | ||||
| } | ||||
|  | ||||
| type noObjectivesSummary struct { | ||||
| 	// countAndHotIdx enables lock-free writes with use of atomic updates. | ||||
| 	// The most significant bit is the hot index [0 or 1] of the count field | ||||
| 	// below. Observe calls update the hot one. All remaining bits count the | ||||
| 	// number of Observe calls. Observe starts by incrementing this counter, | ||||
| 	// and finish by incrementing the count field in the respective | ||||
| 	// summaryCounts, as a marker for completion. | ||||
| 	// | ||||
| 	// Calls of the Write method (which are non-mutating reads from the | ||||
| 	// perspective of the summary) swap the hot–cold under the writeMtx | ||||
| 	// lock. A cooldown is awaited (while locked) by comparing the number of | ||||
| 	// observations with the initiation count. Once they match, then the | ||||
| 	// last observation on the now cool one has completed. All cool fields must | ||||
| 	// be merged into the new hot before releasing writeMtx. | ||||
|  | ||||
| 	// Fields with atomic access first! See alignment constraint: | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | ||||
| 	countAndHotIdx uint64 | ||||
|  | ||||
| 	selfCollector | ||||
| 	desc     *Desc | ||||
| 	writeMtx sync.Mutex // Only used in the Write method. | ||||
|  | ||||
| 	// Two counts, one is "hot" for lock-free observations, the other is | ||||
| 	// "cold" for writing out a dto.Metric. It has to be an array of | ||||
| 	// pointers to guarantee 64bit alignment of the histogramCounts, see | ||||
| 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. | ||||
| 	counts [2]*summaryCounts | ||||
|  | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| func (s *noObjectivesSummary) Desc() *Desc { | ||||
| 	return s.desc | ||||
| } | ||||
|  | ||||
| func (s *noObjectivesSummary) Observe(v float64) { | ||||
| 	// We increment h.countAndHotIdx so that the counter in the lower | ||||
| 	// 63 bits gets incremented. At the same time, we get the new value | ||||
| 	// back, which we can use to find the currently-hot counts. | ||||
| 	n := atomic.AddUint64(&s.countAndHotIdx, 1) | ||||
| 	hotCounts := s.counts[n>>63] | ||||
|  | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&hotCounts.sumBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | ||||
| 		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	// Increment count last as we take it as a signal that the observation | ||||
| 	// is complete. | ||||
| 	atomic.AddUint64(&hotCounts.count, 1) | ||||
| } | ||||
|  | ||||
| func (s *noObjectivesSummary) Write(out *dto.Metric) error { | ||||
| 	// For simplicity, we protect this whole method by a mutex. It is not in | ||||
| 	// the hot path, i.e. Observe is called much more often than Write. The | ||||
| 	// complication of making Write lock-free isn't worth it, if possible at | ||||
| 	// all. | ||||
| 	s.writeMtx.Lock() | ||||
| 	defer s.writeMtx.Unlock() | ||||
|  | ||||
| 	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) | ||||
| 	// without touching the count bits. See the struct comments for a full | ||||
| 	// description of the algorithm. | ||||
| 	n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) | ||||
| 	// count is contained unchanged in the lower 63 bits. | ||||
| 	count := n & ((1 << 63) - 1) | ||||
| 	// The most significant bit tells us which counts is hot. The complement | ||||
| 	// is thus the cold one. | ||||
| 	hotCounts := s.counts[n>>63] | ||||
| 	coldCounts := s.counts[(^n)>>63] | ||||
|  | ||||
| 	// Await cooldown. | ||||
| 	for count != atomic.LoadUint64(&coldCounts.count) { | ||||
| 		runtime.Gosched() // Let observations get work done. | ||||
| 	} | ||||
|  | ||||
| 	sum := &dto.Summary{ | ||||
| 		SampleCount: proto.Uint64(count), | ||||
| 		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), | ||||
| 	} | ||||
|  | ||||
| 	out.Summary = sum | ||||
| 	out.Label = s.labelPairs | ||||
|  | ||||
| 	// Finally add all the cold counts to the new hot counts and reset the cold counts. | ||||
| 	atomic.AddUint64(&hotCounts.count, count) | ||||
| 	atomic.StoreUint64(&coldCounts.count, 0) | ||||
| 	for { | ||||
| 		oldBits := atomic.LoadUint64(&hotCounts.sumBits) | ||||
| 		newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) | ||||
| 		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | ||||
| 			atomic.StoreUint64(&coldCounts.sumBits, 0) | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type quantSort []*dto.Quantile | ||||
|  | ||||
| func (s quantSort) Len() int { | ||||
| 	return len(s) | ||||
| } | ||||
|  | ||||
| func (s quantSort) Swap(i, j int) { | ||||
| 	s[i], s[j] = s[j], s[i] | ||||
| } | ||||
|  | ||||
| func (s quantSort) Less(i, j int) bool { | ||||
| 	return s[i].GetQuantile() < s[j].GetQuantile() | ||||
| } | ||||
|  | ||||
| // SummaryVec is a Collector that bundles a set of Summaries that all share the | ||||
| // same Desc, but have different values for their variable labels. This is used | ||||
| // if you want to count the same thing partitioned by various dimensions | ||||
| // (e.g. HTTP request latencies, partitioned by status code and method). Create | ||||
| // instances with NewSummaryVec. | ||||
| type SummaryVec struct { | ||||
| 	*MetricVec | ||||
| } | ||||
|  | ||||
| // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and | ||||
| // partitioned by the given label names. | ||||
| // | ||||
| // Due to the way a Summary is represented in the Prometheus text format and how | ||||
| // it is handled by the Prometheus server internally, “quantile” is an illegal | ||||
| // label name. NewSummaryVec will panic if this label name is used. | ||||
| func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { | ||||
| 	for _, ln := range labelNames { | ||||
| 		if ln == quantileLabel { | ||||
| 			panic(errQuantileLabelNotAllowed) | ||||
| 		} | ||||
| 	} | ||||
| 	desc := NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		labelNames, | ||||
| 		opts.ConstLabels, | ||||
| 	) | ||||
| 	return &SummaryVec{ | ||||
| 		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { | ||||
| 			return newSummary(desc, opts, lvs...) | ||||
| 		}), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetMetricWithLabelValues returns the Summary for the given slice of label | ||||
| // values (same order as the variable labels in Desc). If that combination of | ||||
| // label values is accessed for the first time, a new Summary is created. | ||||
| // | ||||
| // It is possible to call this method without using the returned Summary to only | ||||
| // create the new Summary but leave it at its starting value, a Summary without | ||||
| // any observations. | ||||
| // | ||||
| // Keeping the Summary for later use is possible (and should be considered if | ||||
| // performance is critical), but keep in mind that Reset, DeleteLabelValues and | ||||
| // Delete can be used to delete the Summary from the SummaryVec. In that case, | ||||
| // the Summary will still exist, but it will not be exported anymore, even if a | ||||
| // Summary with the same label values is created later. See also the CounterVec | ||||
| // example. | ||||
| // | ||||
| // An error is returned if the number of label values is not the same as the | ||||
| // number of variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | ||||
| // an alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| // See also the GaugeVec example. | ||||
| func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Observer), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // GetMetricWith returns the Summary for the given Labels map (the label names | ||||
| // must match those of the variable labels in Desc). If that label map is | ||||
| // accessed for the first time, a new Summary is created. Implications of | ||||
| // creating a Summary without using it and keeping the Summary for later use are | ||||
| // the same as for GetMetricWithLabelValues. | ||||
| // | ||||
| // An error is returned if the number and names of the Labels are inconsistent | ||||
| // with those of the variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // This method is used for the same purpose as | ||||
| // GetMetricWithLabelValues(...string). See there for pros and cons of the two | ||||
| // methods. | ||||
| func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { | ||||
| 	metric, err := v.MetricVec.GetMetricWith(labels) | ||||
| 	if metric != nil { | ||||
| 		return metric.(Observer), err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // WithLabelValues works as GetMetricWithLabelValues, but panics where | ||||
| // GetMetricWithLabelValues would have returned an error. Not returning an | ||||
| // error allows shortcuts like | ||||
| //     myVec.WithLabelValues("404", "GET").Observe(42.21) | ||||
| func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { | ||||
| 	s, err := v.GetMetricWithLabelValues(lvs...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // With works as GetMetricWith, but panics where GetMetricWithLabels would have | ||||
| // returned an error. Not returning an error allows shortcuts like | ||||
| //     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) | ||||
| func (v *SummaryVec) With(labels Labels) Observer { | ||||
| 	s, err := v.GetMetricWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // CurryWith returns a vector curried with the provided labels, i.e. the | ||||
| // returned vector has those labels pre-set for all labeled operations performed | ||||
| // on it. The cardinality of the curried vector is reduced accordingly. The | ||||
| // order of the remaining labels stays the same (just with the curried labels | ||||
| // taken out of the sequence – which is relevant for the | ||||
| // (GetMetric)WithLabelValues methods). It is possible to curry a curried | ||||
| // vector, but only with labels not yet used for currying before. | ||||
| // | ||||
| // The metrics contained in the SummaryVec are shared between the curried and | ||||
| // uncurried vectors. They are just accessed differently. Curried and uncurried | ||||
| // vectors behave identically in terms of collection. Only one must be | ||||
| // registered with a given registry (usually the uncurried version). The Reset | ||||
| // method deletes all metrics, even if called on a curried vector. | ||||
| func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { | ||||
| 	vec, err := v.MetricVec.CurryWith(labels) | ||||
| 	if vec != nil { | ||||
| 		return &SummaryVec{vec}, err | ||||
| 	} | ||||
| 	return nil, err | ||||
| } | ||||
|  | ||||
| // MustCurryWith works as CurryWith but panics where CurryWith would have | ||||
| // returned an error. | ||||
| func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { | ||||
| 	vec, err := v.CurryWith(labels) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return vec | ||||
| } | ||||
|  | ||||
| type constSummary struct { | ||||
| 	desc       *Desc | ||||
| 	count      uint64 | ||||
| 	sum        float64 | ||||
| 	quantiles  map[float64]float64 | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| func (s *constSummary) Desc() *Desc { | ||||
| 	return s.desc | ||||
| } | ||||
|  | ||||
| func (s *constSummary) Write(out *dto.Metric) error { | ||||
| 	sum := &dto.Summary{} | ||||
| 	qs := make([]*dto.Quantile, 0, len(s.quantiles)) | ||||
|  | ||||
| 	sum.SampleCount = proto.Uint64(s.count) | ||||
| 	sum.SampleSum = proto.Float64(s.sum) | ||||
|  | ||||
| 	for rank, q := range s.quantiles { | ||||
| 		qs = append(qs, &dto.Quantile{ | ||||
| 			Quantile: proto.Float64(rank), | ||||
| 			Value:    proto.Float64(q), | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	if len(qs) > 0 { | ||||
| 		sort.Sort(quantSort(qs)) | ||||
| 	} | ||||
| 	sum.Quantile = qs | ||||
|  | ||||
| 	out.Summary = sum | ||||
| 	out.Label = s.labelPairs | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // NewConstSummary returns a metric representing a Prometheus summary with fixed | ||||
| // values for the count, sum, and quantiles. As those parameters cannot be | ||||
| // changed, the returned value does not implement the Summary interface (but | ||||
| // only the Metric interface). Users of this package will not have much use for | ||||
| // it in regular operations. However, when implementing custom Collectors, it is | ||||
| // useful as a throw-away metric that is generated on the fly to send it to | ||||
| // Prometheus in the Collect method. | ||||
| // | ||||
| // quantiles maps ranks to quantile values. For example, a median latency of | ||||
| // 0.23s and a 99th percentile latency of 0.56s would be expressed as: | ||||
| //     map[float64]float64{0.5: 0.23, 0.99: 0.56} | ||||
| // | ||||
| // NewConstSummary returns an error if the length of labelValues is not | ||||
| // consistent with the variable labels in Desc or if Desc is invalid. | ||||
| func NewConstSummary( | ||||
| 	desc *Desc, | ||||
| 	count uint64, | ||||
| 	sum float64, | ||||
| 	quantiles map[float64]float64, | ||||
| 	labelValues ...string, | ||||
| ) (Metric, error) { | ||||
| 	if desc.err != nil { | ||||
| 		return nil, desc.err | ||||
| 	} | ||||
| 	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &constSummary{ | ||||
| 		desc:       desc, | ||||
| 		count:      count, | ||||
| 		sum:        sum, | ||||
| 		quantiles:  quantiles, | ||||
| 		labelPairs: MakeLabelPairs(desc, labelValues), | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // MustNewConstSummary is a version of NewConstSummary that panics where | ||||
| // NewConstMetric would have returned an error. | ||||
| func MustNewConstSummary( | ||||
| 	desc *Desc, | ||||
| 	count uint64, | ||||
| 	sum float64, | ||||
| 	quantiles map[float64]float64, | ||||
| 	labelValues ...string, | ||||
| ) Metric { | ||||
| 	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
							
								
								
									
										54
									
								
								vendor/github.com/prometheus/client_golang/prometheus/timer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										54
									
								
								vendor/github.com/prometheus/client_golang/prometheus/timer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,54 +0,0 @@ | ||||
| // Copyright 2016 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import "time" | ||||
|  | ||||
| // Timer is a helper type to time functions. Use NewTimer to create new | ||||
| // instances. | ||||
| type Timer struct { | ||||
| 	begin    time.Time | ||||
| 	observer Observer | ||||
| } | ||||
|  | ||||
| // NewTimer creates a new Timer. The provided Observer is used to observe a | ||||
| // duration in seconds. Timer is usually used to time a function call in the | ||||
| // following way: | ||||
| //    func TimeMe() { | ||||
| //        timer := NewTimer(myHistogram) | ||||
| //        defer timer.ObserveDuration() | ||||
| //        // Do actual work. | ||||
| //    } | ||||
| func NewTimer(o Observer) *Timer { | ||||
| 	return &Timer{ | ||||
| 		begin:    time.Now(), | ||||
| 		observer: o, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ObserveDuration records the duration passed since the Timer was created with | ||||
| // NewTimer. It calls the Observe method of the Observer provided during | ||||
| // construction with the duration in seconds as an argument. The observed | ||||
| // duration is also returned. ObserveDuration is usually called with a defer | ||||
| // statement. | ||||
| // | ||||
| // Note that this method is only guaranteed to never observe negative durations | ||||
| // if used with Go1.9+. | ||||
| func (t *Timer) ObserveDuration() time.Duration { | ||||
| 	d := time.Since(t.begin) | ||||
| 	if t.observer != nil { | ||||
| 		t.observer.Observe(d.Seconds()) | ||||
| 	} | ||||
| 	return d | ||||
| } | ||||
							
								
								
									
										42
									
								
								vendor/github.com/prometheus/client_golang/prometheus/untyped.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/prometheus/client_golang/prometheus/untyped.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,42 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| // UntypedOpts is an alias for Opts. See there for doc comments. | ||||
| type UntypedOpts Opts | ||||
|  | ||||
| // UntypedFunc works like GaugeFunc but the collected metric is of type | ||||
| // "Untyped". UntypedFunc is useful to mirror an external metric of unknown | ||||
| // type. | ||||
| // | ||||
| // To create UntypedFunc instances, use NewUntypedFunc. | ||||
| type UntypedFunc interface { | ||||
| 	Metric | ||||
| 	Collector | ||||
| } | ||||
|  | ||||
| // NewUntypedFunc creates a new UntypedFunc based on the provided | ||||
| // UntypedOpts. The value reported is determined by calling the given function | ||||
| // from within the Write method. Take into account that metric collection may | ||||
| // happen concurrently. If that results in concurrent calls to Write, like in | ||||
| // the case where an UntypedFunc is directly registered with Prometheus, the | ||||
| // provided function must be concurrency-safe. | ||||
| func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { | ||||
| 	return newValueFunc(NewDesc( | ||||
| 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | ||||
| 		opts.Help, | ||||
| 		nil, | ||||
| 		opts.ConstLabels, | ||||
| 	), UntypedValue, function) | ||||
| } | ||||
							
								
								
									
										212
									
								
								vendor/github.com/prometheus/client_golang/prometheus/value.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										212
									
								
								vendor/github.com/prometheus/client_golang/prometheus/value.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,212 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"time" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/golang/protobuf/ptypes" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // ValueType is an enumeration of metric types that represent a simple value. | ||||
| type ValueType int | ||||
|  | ||||
| // Possible values for the ValueType enum. Use UntypedValue to mark a metric | ||||
| // with an unknown type. | ||||
| const ( | ||||
| 	_ ValueType = iota | ||||
| 	CounterValue | ||||
| 	GaugeValue | ||||
| 	UntypedValue | ||||
| ) | ||||
|  | ||||
| // valueFunc is a generic metric for simple values retrieved on collect time | ||||
| // from a function. It implements Metric and Collector. Its effective type is | ||||
| // determined by ValueType. This is a low-level building block used by the | ||||
| // library to back the implementations of CounterFunc, GaugeFunc, and | ||||
| // UntypedFunc. | ||||
| type valueFunc struct { | ||||
| 	selfCollector | ||||
|  | ||||
| 	desc       *Desc | ||||
| 	valType    ValueType | ||||
| 	function   func() float64 | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| // newValueFunc returns a newly allocated valueFunc with the given Desc and | ||||
| // ValueType. The value reported is determined by calling the given function | ||||
| // from within the Write method. Take into account that metric collection may | ||||
| // happen concurrently. If that results in concurrent calls to Write, like in | ||||
| // the case where a valueFunc is directly registered with Prometheus, the | ||||
| // provided function must be concurrency-safe. | ||||
| func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { | ||||
| 	result := &valueFunc{ | ||||
| 		desc:       desc, | ||||
| 		valType:    valueType, | ||||
| 		function:   function, | ||||
| 		labelPairs: MakeLabelPairs(desc, nil), | ||||
| 	} | ||||
| 	result.init(result) | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func (v *valueFunc) Desc() *Desc { | ||||
| 	return v.desc | ||||
| } | ||||
|  | ||||
| func (v *valueFunc) Write(out *dto.Metric) error { | ||||
| 	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) | ||||
| } | ||||
|  | ||||
| // NewConstMetric returns a metric with one fixed value that cannot be | ||||
| // changed. Users of this package will not have much use for it in regular | ||||
| // operations. However, when implementing custom Collectors, it is useful as a | ||||
| // throw-away metric that is generated on the fly to send it to Prometheus in | ||||
| // the Collect method. NewConstMetric returns an error if the length of | ||||
| // labelValues is not consistent with the variable labels in Desc or if Desc is | ||||
| // invalid. | ||||
| func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { | ||||
| 	if desc.err != nil { | ||||
| 		return nil, desc.err | ||||
| 	} | ||||
| 	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &constMetric{ | ||||
| 		desc:       desc, | ||||
| 		valType:    valueType, | ||||
| 		val:        value, | ||||
| 		labelPairs: MakeLabelPairs(desc, labelValues), | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // MustNewConstMetric is a version of NewConstMetric that panics where | ||||
| // NewConstMetric would have returned an error. | ||||
| func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { | ||||
| 	m, err := NewConstMetric(desc, valueType, value, labelValues...) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| type constMetric struct { | ||||
| 	desc       *Desc | ||||
| 	valType    ValueType | ||||
| 	val        float64 | ||||
| 	labelPairs []*dto.LabelPair | ||||
| } | ||||
|  | ||||
| func (m *constMetric) Desc() *Desc { | ||||
| 	return m.desc | ||||
| } | ||||
|  | ||||
| func (m *constMetric) Write(out *dto.Metric) error { | ||||
| 	return populateMetric(m.valType, m.val, m.labelPairs, nil, out) | ||||
| } | ||||
|  | ||||
| func populateMetric( | ||||
| 	t ValueType, | ||||
| 	v float64, | ||||
| 	labelPairs []*dto.LabelPair, | ||||
| 	e *dto.Exemplar, | ||||
| 	m *dto.Metric, | ||||
| ) error { | ||||
| 	m.Label = labelPairs | ||||
| 	switch t { | ||||
| 	case CounterValue: | ||||
| 		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} | ||||
| 	case GaugeValue: | ||||
| 		m.Gauge = &dto.Gauge{Value: proto.Float64(v)} | ||||
| 	case UntypedValue: | ||||
| 		m.Untyped = &dto.Untyped{Value: proto.Float64(v)} | ||||
| 	default: | ||||
| 		return fmt.Errorf("encountered unknown type %v", t) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // MakeLabelPairs is a helper function to create protobuf LabelPairs from the | ||||
| // variable and constant labels in the provided Desc. The values for the | ||||
| // variable labels are defined by the labelValues slice, which must be in the | ||||
| // same order as the corresponding variable labels in the Desc. | ||||
| // | ||||
| // This function is only needed for custom Metric implementations. See MetricVec | ||||
| // example. | ||||
| func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { | ||||
| 	totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) | ||||
| 	if totalLen == 0 { | ||||
| 		// Super fast path. | ||||
| 		return nil | ||||
| 	} | ||||
| 	if len(desc.variableLabels) == 0 { | ||||
| 		// Moderately fast path. | ||||
| 		return desc.constLabelPairs | ||||
| 	} | ||||
| 	labelPairs := make([]*dto.LabelPair, 0, totalLen) | ||||
| 	for i, n := range desc.variableLabels { | ||||
| 		labelPairs = append(labelPairs, &dto.LabelPair{ | ||||
| 			Name:  proto.String(n), | ||||
| 			Value: proto.String(labelValues[i]), | ||||
| 		}) | ||||
| 	} | ||||
| 	labelPairs = append(labelPairs, desc.constLabelPairs...) | ||||
| 	sort.Sort(labelPairSorter(labelPairs)) | ||||
| 	return labelPairs | ||||
| } | ||||
|  | ||||
| // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. | ||||
| const ExemplarMaxRunes = 64 | ||||
|  | ||||
| // newExemplar creates a new dto.Exemplar from the provided values. An error is | ||||
| // returned if any of the label names or values are invalid or if the total | ||||
| // number of runes in the label names and values exceeds ExemplarMaxRunes. | ||||
| func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { | ||||
| 	e := &dto.Exemplar{} | ||||
| 	e.Value = proto.Float64(value) | ||||
| 	tsProto, err := ptypes.TimestampProto(ts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	e.Timestamp = tsProto | ||||
| 	labelPairs := make([]*dto.LabelPair, 0, len(l)) | ||||
| 	var runes int | ||||
| 	for name, value := range l { | ||||
| 		if !checkLabelName(name) { | ||||
| 			return nil, fmt.Errorf("exemplar label name %q is invalid", name) | ||||
| 		} | ||||
| 		runes += utf8.RuneCountInString(name) | ||||
| 		if !utf8.ValidString(value) { | ||||
| 			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) | ||||
| 		} | ||||
| 		runes += utf8.RuneCountInString(value) | ||||
| 		labelPairs = append(labelPairs, &dto.LabelPair{ | ||||
| 			Name:  proto.String(name), | ||||
| 			Value: proto.String(value), | ||||
| 		}) | ||||
| 	} | ||||
| 	if runes > ExemplarMaxRunes { | ||||
| 		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) | ||||
| 	} | ||||
| 	e.Label = labelPairs | ||||
| 	return e, nil | ||||
| } | ||||
							
								
								
									
										556
									
								
								vendor/github.com/prometheus/client_golang/prometheus/vec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										556
									
								
								vendor/github.com/prometheus/client_golang/prometheus/vec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,556 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/prometheus/common/model" | ||||
| ) | ||||
|  | ||||
| // MetricVec is a Collector to bundle metrics of the same name that differ in | ||||
| // their label values. MetricVec is not used directly but as a building block | ||||
| // for implementations of vectors of a given metric type, like GaugeVec, | ||||
| // CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be | ||||
| // used for custom Metric implementations. | ||||
| // | ||||
| // To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in | ||||
| // FooVec and initialize it with NewMetricVec. Implement wrappers for | ||||
| // GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather | ||||
| // than (Metric, error). Similarly, create a wrapper for CurryWith that returns | ||||
| // (*FooVec, error) rather than (*MetricVec, error). It is recommended to also | ||||
| // add the convenience methods WithLabelValues, With, and MustCurryWith, which | ||||
| // panic instead of returning errors. See also the MetricVec example. | ||||
| type MetricVec struct { | ||||
| 	*metricMap | ||||
|  | ||||
| 	curry []curriedLabelValue | ||||
|  | ||||
| 	// hashAdd and hashAddByte can be replaced for testing collision handling. | ||||
| 	hashAdd     func(h uint64, s string) uint64 | ||||
| 	hashAddByte func(h uint64, b byte) uint64 | ||||
| } | ||||
|  | ||||
| // NewMetricVec returns an initialized metricVec. | ||||
| func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { | ||||
| 	return &MetricVec{ | ||||
| 		metricMap: &metricMap{ | ||||
| 			metrics:   map[uint64][]metricWithLabelValues{}, | ||||
| 			desc:      desc, | ||||
| 			newMetric: newMetric, | ||||
| 		}, | ||||
| 		hashAdd:     hashAdd, | ||||
| 		hashAddByte: hashAddByte, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // DeleteLabelValues removes the metric where the variable labels are the same | ||||
| // as those passed in as labels (same order as the VariableLabels in Desc). It | ||||
| // returns true if a metric was deleted. | ||||
| // | ||||
| // It is not an error if the number of label values is not the same as the | ||||
| // number of VariableLabels in Desc. However, such inconsistent label count can | ||||
| // never match an actual metric, so the method will always return false in that | ||||
| // case. | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider Delete(Labels) as an | ||||
| // alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| // See also the CounterVec example. | ||||
| func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { | ||||
| 	h, err := m.hashLabelValues(lvs) | ||||
| 	if err != nil { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) | ||||
| } | ||||
|  | ||||
| // Delete deletes the metric where the variable labels are the same as those | ||||
| // passed in as labels. It returns true if a metric was deleted. | ||||
| // | ||||
| // It is not an error if the number and names of the Labels are inconsistent | ||||
| // with those of the VariableLabels in Desc. However, such inconsistent Labels | ||||
| // can never match an actual metric, so the method will always return false in | ||||
| // that case. | ||||
| // | ||||
| // This method is used for the same purpose as DeleteLabelValues(...string). See | ||||
| // there for pros and cons of the two methods. | ||||
| func (m *MetricVec) Delete(labels Labels) bool { | ||||
| 	h, err := m.hashLabels(labels) | ||||
| 	if err != nil { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) | ||||
| } | ||||
|  | ||||
| // Without explicit forwarding of Describe, Collect, Reset, those methods won't | ||||
| // show up in GoDoc. | ||||
|  | ||||
| // Describe implements Collector. | ||||
| func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } | ||||
|  | ||||
| // Collect implements Collector. | ||||
| func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } | ||||
|  | ||||
| // Reset deletes all metrics in this vector. | ||||
| func (m *MetricVec) Reset() { m.metricMap.Reset() } | ||||
|  | ||||
| // CurryWith returns a vector curried with the provided labels, i.e. the | ||||
| // returned vector has those labels pre-set for all labeled operations performed | ||||
| // on it. The cardinality of the curried vector is reduced accordingly. The | ||||
| // order of the remaining labels stays the same (just with the curried labels | ||||
| // taken out of the sequence – which is relevant for the | ||||
| // (GetMetric)WithLabelValues methods). It is possible to curry a curried | ||||
| // vector, but only with labels not yet used for currying before. | ||||
| // | ||||
| // The metrics contained in the MetricVec are shared between the curried and | ||||
| // uncurried vectors. They are just accessed differently. Curried and uncurried | ||||
| // vectors behave identically in terms of collection. Only one must be | ||||
| // registered with a given registry (usually the uncurried version). The Reset | ||||
| // method deletes all metrics, even if called on a curried vector. | ||||
| // | ||||
| // Note that CurryWith is usually not called directly but through a wrapper | ||||
| // around MetricVec, implementing a vector for a specific Metric | ||||
| // implementation, for example GaugeVec. | ||||
| func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { | ||||
| 	var ( | ||||
| 		newCurry []curriedLabelValue | ||||
| 		oldCurry = m.curry | ||||
| 		iCurry   int | ||||
| 	) | ||||
| 	for i, label := range m.desc.variableLabels { | ||||
| 		val, ok := labels[label] | ||||
| 		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { | ||||
| 			if ok { | ||||
| 				return nil, fmt.Errorf("label name %q is already curried", label) | ||||
| 			} | ||||
| 			newCurry = append(newCurry, oldCurry[iCurry]) | ||||
| 			iCurry++ | ||||
| 		} else { | ||||
| 			if !ok { | ||||
| 				continue // Label stays uncurried. | ||||
| 			} | ||||
| 			newCurry = append(newCurry, curriedLabelValue{i, val}) | ||||
| 		} | ||||
| 	} | ||||
| 	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { | ||||
| 		return nil, fmt.Errorf("%d unknown label(s) found during currying", l) | ||||
| 	} | ||||
|  | ||||
| 	return &MetricVec{ | ||||
| 		metricMap:   m.metricMap, | ||||
| 		curry:       newCurry, | ||||
| 		hashAdd:     m.hashAdd, | ||||
| 		hashAddByte: m.hashAddByte, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // GetMetricWithLabelValues returns the Metric for the given slice of label | ||||
| // values (same order as the variable labels in Desc). If that combination of | ||||
| // label values is accessed for the first time, a new Metric is created (by | ||||
| // calling the newMetric function provided during construction of the | ||||
| // MetricVec). | ||||
| // | ||||
| // It is possible to call this method without using the returned Metry to only | ||||
| // create the new Metric but leave it in its intitial state. | ||||
| // | ||||
| // Keeping the Metric for later use is possible (and should be considered if | ||||
| // performance is critical), but keep in mind that Reset, DeleteLabelValues and | ||||
| // Delete can be used to delete the Metric from the MetricVec. In that case, the | ||||
| // Metric will still exist, but it will not be exported anymore, even if a | ||||
| // Metric with the same label values is created later. | ||||
| // | ||||
| // An error is returned if the number of label values is not the same as the | ||||
| // number of variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // Note that for more than one label value, this method is prone to mistakes | ||||
| // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | ||||
| // an alternative to avoid that type of mistake. For higher label numbers, the | ||||
| // latter has a much more readable (albeit more verbose) syntax, but it comes | ||||
| // with a performance overhead (for creating and processing the Labels map). | ||||
| // | ||||
| // Note that GetMetricWithLabelValues is usually not called directly but through | ||||
| // a wrapper around MetricVec, implementing a vector for a specific Metric | ||||
| // implementation, for example GaugeVec. | ||||
| func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { | ||||
| 	h, err := m.hashLabelValues(lvs) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil | ||||
| } | ||||
|  | ||||
| // GetMetricWith returns the Metric for the given Labels map (the label names | ||||
| // must match those of the variable labels in Desc). If that label map is | ||||
| // accessed for the first time, a new Metric is created. Implications of | ||||
| // creating a Metric without using it and keeping the Metric for later use | ||||
| // are the same as for GetMetricWithLabelValues. | ||||
| // | ||||
| // An error is returned if the number and names of the Labels are inconsistent | ||||
| // with those of the variable labels in Desc (minus any curried labels). | ||||
| // | ||||
| // This method is used for the same purpose as | ||||
| // GetMetricWithLabelValues(...string). See there for pros and cons of the two | ||||
| // methods. | ||||
| // | ||||
| // Note that GetMetricWith is usually not called directly but through a wrapper | ||||
| // around MetricVec, implementing a vector for a specific Metric implementation, | ||||
| // for example GaugeVec. | ||||
| func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { | ||||
| 	h, err := m.hashLabels(labels) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil | ||||
| } | ||||
|  | ||||
| func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { | ||||
| 	if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	var ( | ||||
| 		h             = hashNew() | ||||
| 		curry         = m.curry | ||||
| 		iVals, iCurry int | ||||
| 	) | ||||
| 	for i := 0; i < len(m.desc.variableLabels); i++ { | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			h = m.hashAdd(h, curry[iCurry].value) | ||||
| 			iCurry++ | ||||
| 		} else { | ||||
| 			h = m.hashAdd(h, vals[iVals]) | ||||
| 			iVals++ | ||||
| 		} | ||||
| 		h = m.hashAddByte(h, model.SeparatorByte) | ||||
| 	} | ||||
| 	return h, nil | ||||
| } | ||||
|  | ||||
| func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { | ||||
| 	if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	var ( | ||||
| 		h      = hashNew() | ||||
| 		curry  = m.curry | ||||
| 		iCurry int | ||||
| 	) | ||||
| 	for i, label := range m.desc.variableLabels { | ||||
| 		val, ok := labels[label] | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			if ok { | ||||
| 				return 0, fmt.Errorf("label name %q is already curried", label) | ||||
| 			} | ||||
| 			h = m.hashAdd(h, curry[iCurry].value) | ||||
| 			iCurry++ | ||||
| 		} else { | ||||
| 			if !ok { | ||||
| 				return 0, fmt.Errorf("label name %q missing in label map", label) | ||||
| 			} | ||||
| 			h = m.hashAdd(h, val) | ||||
| 		} | ||||
| 		h = m.hashAddByte(h, model.SeparatorByte) | ||||
| 	} | ||||
| 	return h, nil | ||||
| } | ||||
|  | ||||
| // metricWithLabelValues provides the metric and its label values for | ||||
| // disambiguation on hash collision. | ||||
| type metricWithLabelValues struct { | ||||
| 	values []string | ||||
| 	metric Metric | ||||
| } | ||||
|  | ||||
| // curriedLabelValue sets the curried value for a label at the given index. | ||||
| type curriedLabelValue struct { | ||||
| 	index int | ||||
| 	value string | ||||
| } | ||||
|  | ||||
| // metricMap is a helper for metricVec and shared between differently curried | ||||
| // metricVecs. | ||||
| type metricMap struct { | ||||
| 	mtx       sync.RWMutex // Protects metrics. | ||||
| 	metrics   map[uint64][]metricWithLabelValues | ||||
| 	desc      *Desc | ||||
| 	newMetric func(labelValues ...string) Metric | ||||
| } | ||||
|  | ||||
| // Describe implements Collector. It will send exactly one Desc to the provided | ||||
| // channel. | ||||
| func (m *metricMap) Describe(ch chan<- *Desc) { | ||||
| 	ch <- m.desc | ||||
| } | ||||
|  | ||||
| // Collect implements Collector. | ||||
| func (m *metricMap) Collect(ch chan<- Metric) { | ||||
| 	m.mtx.RLock() | ||||
| 	defer m.mtx.RUnlock() | ||||
|  | ||||
| 	for _, metrics := range m.metrics { | ||||
| 		for _, metric := range metrics { | ||||
| 			ch <- metric.metric | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reset deletes all metrics in this vector. | ||||
| func (m *metricMap) Reset() { | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
|  | ||||
| 	for h := range m.metrics { | ||||
| 		delete(m.metrics, h) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // deleteByHashWithLabelValues removes the metric from the hash bucket h. If | ||||
| // there are multiple matches in the bucket, use lvs to select a metric and | ||||
| // remove only that metric. | ||||
| func (m *metricMap) deleteByHashWithLabelValues( | ||||
| 	h uint64, lvs []string, curry []curriedLabelValue, | ||||
| ) bool { | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
|  | ||||
| 	metrics, ok := m.metrics[h] | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	i := findMetricWithLabelValues(metrics, lvs, curry) | ||||
| 	if i >= len(metrics) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	if len(metrics) > 1 { | ||||
| 		old := metrics | ||||
| 		m.metrics[h] = append(metrics[:i], metrics[i+1:]...) | ||||
| 		old[len(old)-1] = metricWithLabelValues{} | ||||
| 	} else { | ||||
| 		delete(m.metrics, h) | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // deleteByHashWithLabels removes the metric from the hash bucket h. If there | ||||
| // are multiple matches in the bucket, use lvs to select a metric and remove | ||||
| // only that metric. | ||||
| func (m *metricMap) deleteByHashWithLabels( | ||||
| 	h uint64, labels Labels, curry []curriedLabelValue, | ||||
| ) bool { | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
|  | ||||
| 	metrics, ok := m.metrics[h] | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	i := findMetricWithLabels(m.desc, metrics, labels, curry) | ||||
| 	if i >= len(metrics) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	if len(metrics) > 1 { | ||||
| 		old := metrics | ||||
| 		m.metrics[h] = append(metrics[:i], metrics[i+1:]...) | ||||
| 		old[len(old)-1] = metricWithLabelValues{} | ||||
| 	} else { | ||||
| 		delete(m.metrics, h) | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value | ||||
| // or creates it and returns the new one. | ||||
| // | ||||
| // This function holds the mutex. | ||||
| func (m *metricMap) getOrCreateMetricWithLabelValues( | ||||
| 	hash uint64, lvs []string, curry []curriedLabelValue, | ||||
| ) Metric { | ||||
| 	m.mtx.RLock() | ||||
| 	metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) | ||||
| 	m.mtx.RUnlock() | ||||
| 	if ok { | ||||
| 		return metric | ||||
| 	} | ||||
|  | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
| 	metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) | ||||
| 	if !ok { | ||||
| 		inlinedLVs := inlineLabelValues(lvs, curry) | ||||
| 		metric = m.newMetric(inlinedLVs...) | ||||
| 		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) | ||||
| 	} | ||||
| 	return metric | ||||
| } | ||||
|  | ||||
| // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value | ||||
| // or creates it and returns the new one. | ||||
| // | ||||
| // This function holds the mutex. | ||||
| func (m *metricMap) getOrCreateMetricWithLabels( | ||||
| 	hash uint64, labels Labels, curry []curriedLabelValue, | ||||
| ) Metric { | ||||
| 	m.mtx.RLock() | ||||
| 	metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) | ||||
| 	m.mtx.RUnlock() | ||||
| 	if ok { | ||||
| 		return metric | ||||
| 	} | ||||
|  | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
| 	metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) | ||||
| 	if !ok { | ||||
| 		lvs := extractLabelValues(m.desc, labels, curry) | ||||
| 		metric = m.newMetric(lvs...) | ||||
| 		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) | ||||
| 	} | ||||
| 	return metric | ||||
| } | ||||
|  | ||||
| // getMetricWithHashAndLabelValues gets a metric while handling possible | ||||
| // collisions in the hash space. Must be called while holding the read mutex. | ||||
| func (m *metricMap) getMetricWithHashAndLabelValues( | ||||
| 	h uint64, lvs []string, curry []curriedLabelValue, | ||||
| ) (Metric, bool) { | ||||
| 	metrics, ok := m.metrics[h] | ||||
| 	if ok { | ||||
| 		if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { | ||||
| 			return metrics[i].metric, true | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, false | ||||
| } | ||||
|  | ||||
| // getMetricWithHashAndLabels gets a metric while handling possible collisions in | ||||
| // the hash space. Must be called while holding read mutex. | ||||
| func (m *metricMap) getMetricWithHashAndLabels( | ||||
| 	h uint64, labels Labels, curry []curriedLabelValue, | ||||
| ) (Metric, bool) { | ||||
| 	metrics, ok := m.metrics[h] | ||||
| 	if ok { | ||||
| 		if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { | ||||
| 			return metrics[i].metric, true | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, false | ||||
| } | ||||
|  | ||||
| // findMetricWithLabelValues returns the index of the matching metric or | ||||
| // len(metrics) if not found. | ||||
| func findMetricWithLabelValues( | ||||
| 	metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, | ||||
| ) int { | ||||
| 	for i, metric := range metrics { | ||||
| 		if matchLabelValues(metric.values, lvs, curry) { | ||||
| 			return i | ||||
| 		} | ||||
| 	} | ||||
| 	return len(metrics) | ||||
| } | ||||
|  | ||||
| // findMetricWithLabels returns the index of the matching metric or len(metrics) | ||||
| // if not found. | ||||
| func findMetricWithLabels( | ||||
| 	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, | ||||
| ) int { | ||||
| 	for i, metric := range metrics { | ||||
| 		if matchLabels(desc, metric.values, labels, curry) { | ||||
| 			return i | ||||
| 		} | ||||
| 	} | ||||
| 	return len(metrics) | ||||
| } | ||||
|  | ||||
| func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { | ||||
| 	if len(values) != len(lvs)+len(curry) { | ||||
| 		return false | ||||
| 	} | ||||
| 	var iLVs, iCurry int | ||||
| 	for i, v := range values { | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			if v != curry[iCurry].value { | ||||
| 				return false | ||||
| 			} | ||||
| 			iCurry++ | ||||
| 			continue | ||||
| 		} | ||||
| 		if v != lvs[iLVs] { | ||||
| 			return false | ||||
| 		} | ||||
| 		iLVs++ | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { | ||||
| 	if len(values) != len(labels)+len(curry) { | ||||
| 		return false | ||||
| 	} | ||||
| 	iCurry := 0 | ||||
| 	for i, k := range desc.variableLabels { | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			if values[i] != curry[iCurry].value { | ||||
| 				return false | ||||
| 			} | ||||
| 			iCurry++ | ||||
| 			continue | ||||
| 		} | ||||
| 		if values[i] != labels[k] { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { | ||||
| 	labelValues := make([]string, len(labels)+len(curry)) | ||||
| 	iCurry := 0 | ||||
| 	for i, k := range desc.variableLabels { | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			labelValues[i] = curry[iCurry].value | ||||
| 			iCurry++ | ||||
| 			continue | ||||
| 		} | ||||
| 		labelValues[i] = labels[k] | ||||
| 	} | ||||
| 	return labelValues | ||||
| } | ||||
|  | ||||
| func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { | ||||
| 	labelValues := make([]string, len(lvs)+len(curry)) | ||||
| 	var iCurry, iLVs int | ||||
| 	for i := range labelValues { | ||||
| 		if iCurry < len(curry) && curry[iCurry].index == i { | ||||
| 			labelValues[i] = curry[iCurry].value | ||||
| 			iCurry++ | ||||
| 			continue | ||||
| 		} | ||||
| 		labelValues[i] = lvs[iLVs] | ||||
| 		iLVs++ | ||||
| 	} | ||||
| 	return labelValues | ||||
| } | ||||
							
								
								
									
										214
									
								
								vendor/github.com/prometheus/client_golang/prometheus/wrap.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										214
									
								
								vendor/github.com/prometheus/client_golang/prometheus/wrap.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,214 +0,0 @@ | ||||
| // Copyright 2018 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package prometheus | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
|  | ||||
| 	//lint:ignore SA1019 Need to keep deprecated package for compatibility. | ||||
| 	"github.com/golang/protobuf/proto" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // WrapRegistererWith returns a Registerer wrapping the provided | ||||
| // Registerer. Collectors registered with the returned Registerer will be | ||||
| // registered with the wrapped Registerer in a modified way. The modified | ||||
| // Collector adds the provided Labels to all Metrics it collects (as | ||||
| // ConstLabels). The Metrics collected by the unmodified Collector must not | ||||
| // duplicate any of those labels. Wrapping a nil value is valid, resulting | ||||
| // in a no-op Registerer. | ||||
| // | ||||
| // WrapRegistererWith provides a way to add fixed labels to a subset of | ||||
| // Collectors. It should not be used to add fixed labels to all metrics | ||||
| // exposed. See also | ||||
| // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels | ||||
| // | ||||
| // Conflicts between Collectors registered through the original Registerer with | ||||
| // Collectors registered through the wrapping Registerer will still be | ||||
| // detected. Any AlreadyRegisteredError returned by the Register method of | ||||
| // either Registerer will contain the ExistingCollector in the form it was | ||||
| // provided to the respective registry. | ||||
| // | ||||
| // The Collector example demonstrates a use of WrapRegistererWith. | ||||
| func WrapRegistererWith(labels Labels, reg Registerer) Registerer { | ||||
| 	return &wrappingRegisterer{ | ||||
| 		wrappedRegisterer: reg, | ||||
| 		labels:            labels, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WrapRegistererWithPrefix returns a Registerer wrapping the provided | ||||
| // Registerer. Collectors registered with the returned Registerer will be | ||||
| // registered with the wrapped Registerer in a modified way. The modified | ||||
| // Collector adds the provided prefix to the name of all Metrics it collects. | ||||
| // Wrapping a nil value is valid, resulting in a no-op Registerer. | ||||
| // | ||||
| // WrapRegistererWithPrefix is useful to have one place to prefix all metrics of | ||||
| // a sub-system. To make this work, register metrics of the sub-system with the | ||||
| // wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful | ||||
| // to use the same prefix for all metrics exposed. In particular, do not prefix | ||||
| // metric names that are standardized across applications, as that would break | ||||
| // horizontal monitoring, for example the metrics provided by the Go collector | ||||
| // (see NewGoCollector) and the process collector (see NewProcessCollector). (In | ||||
| // fact, those metrics are already prefixed with “go_” or “process_”, | ||||
| // respectively.) | ||||
| // | ||||
| // Conflicts between Collectors registered through the original Registerer with | ||||
| // Collectors registered through the wrapping Registerer will still be | ||||
| // detected. Any AlreadyRegisteredError returned by the Register method of | ||||
| // either Registerer will contain the ExistingCollector in the form it was | ||||
| // provided to the respective registry. | ||||
| func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { | ||||
| 	return &wrappingRegisterer{ | ||||
| 		wrappedRegisterer: reg, | ||||
| 		prefix:            prefix, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type wrappingRegisterer struct { | ||||
| 	wrappedRegisterer Registerer | ||||
| 	prefix            string | ||||
| 	labels            Labels | ||||
| } | ||||
|  | ||||
| func (r *wrappingRegisterer) Register(c Collector) error { | ||||
| 	if r.wrappedRegisterer == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return r.wrappedRegisterer.Register(&wrappingCollector{ | ||||
| 		wrappedCollector: c, | ||||
| 		prefix:           r.prefix, | ||||
| 		labels:           r.labels, | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (r *wrappingRegisterer) MustRegister(cs ...Collector) { | ||||
| 	if r.wrappedRegisterer == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	for _, c := range cs { | ||||
| 		if err := r.Register(c); err != nil { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *wrappingRegisterer) Unregister(c Collector) bool { | ||||
| 	if r.wrappedRegisterer == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return r.wrappedRegisterer.Unregister(&wrappingCollector{ | ||||
| 		wrappedCollector: c, | ||||
| 		prefix:           r.prefix, | ||||
| 		labels:           r.labels, | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| type wrappingCollector struct { | ||||
| 	wrappedCollector Collector | ||||
| 	prefix           string | ||||
| 	labels           Labels | ||||
| } | ||||
|  | ||||
| func (c *wrappingCollector) Collect(ch chan<- Metric) { | ||||
| 	wrappedCh := make(chan Metric) | ||||
| 	go func() { | ||||
| 		c.wrappedCollector.Collect(wrappedCh) | ||||
| 		close(wrappedCh) | ||||
| 	}() | ||||
| 	for m := range wrappedCh { | ||||
| 		ch <- &wrappingMetric{ | ||||
| 			wrappedMetric: m, | ||||
| 			prefix:        c.prefix, | ||||
| 			labels:        c.labels, | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *wrappingCollector) Describe(ch chan<- *Desc) { | ||||
| 	wrappedCh := make(chan *Desc) | ||||
| 	go func() { | ||||
| 		c.wrappedCollector.Describe(wrappedCh) | ||||
| 		close(wrappedCh) | ||||
| 	}() | ||||
| 	for desc := range wrappedCh { | ||||
| 		ch <- wrapDesc(desc, c.prefix, c.labels) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *wrappingCollector) unwrapRecursively() Collector { | ||||
| 	switch wc := c.wrappedCollector.(type) { | ||||
| 	case *wrappingCollector: | ||||
| 		return wc.unwrapRecursively() | ||||
| 	default: | ||||
| 		return wc | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type wrappingMetric struct { | ||||
| 	wrappedMetric Metric | ||||
| 	prefix        string | ||||
| 	labels        Labels | ||||
| } | ||||
|  | ||||
| func (m *wrappingMetric) Desc() *Desc { | ||||
| 	return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) | ||||
| } | ||||
|  | ||||
| func (m *wrappingMetric) Write(out *dto.Metric) error { | ||||
| 	if err := m.wrappedMetric.Write(out); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if len(m.labels) == 0 { | ||||
| 		// No wrapping labels. | ||||
| 		return nil | ||||
| 	} | ||||
| 	for ln, lv := range m.labels { | ||||
| 		out.Label = append(out.Label, &dto.LabelPair{ | ||||
| 			Name:  proto.String(ln), | ||||
| 			Value: proto.String(lv), | ||||
| 		}) | ||||
| 	} | ||||
| 	sort.Sort(labelPairSorter(out.Label)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { | ||||
| 	constLabels := Labels{} | ||||
| 	for _, lp := range desc.constLabelPairs { | ||||
| 		constLabels[*lp.Name] = *lp.Value | ||||
| 	} | ||||
| 	for ln, lv := range labels { | ||||
| 		if _, alreadyUsed := constLabels[ln]; alreadyUsed { | ||||
| 			return &Desc{ | ||||
| 				fqName:          desc.fqName, | ||||
| 				help:            desc.help, | ||||
| 				variableLabels:  desc.variableLabels, | ||||
| 				constLabelPairs: desc.constLabelPairs, | ||||
| 				err:             fmt.Errorf("attempted wrapping with already existing label name %q", ln), | ||||
| 			} | ||||
| 		} | ||||
| 		constLabels[ln] = lv | ||||
| 	} | ||||
| 	// NewDesc will do remaining validations. | ||||
| 	newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) | ||||
| 	// Propagate errors if there was any. This will override any errer | ||||
| 	// created by NewDesc above, i.e. earlier errors get precedence. | ||||
| 	if desc.err != nil { | ||||
| 		newDesc.err = desc.err | ||||
| 	} | ||||
| 	return newDesc | ||||
| } | ||||
							
								
								
									
										201
									
								
								vendor/github.com/prometheus/client_model/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/prometheus/client_model/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,201 +0,0 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										5
									
								
								vendor/github.com/prometheus/client_model/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/prometheus/client_model/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,5 +0,0 @@ | ||||
| Data model artifacts for Prometheus. | ||||
| Copyright 2012-2015 The Prometheus Authors | ||||
|  | ||||
| This product includes software developed at | ||||
| SoundCloud Ltd. (http://soundcloud.com/). | ||||
							
								
								
									
										723
									
								
								vendor/github.com/prometheus/client_model/go/metrics.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										723
									
								
								vendor/github.com/prometheus/client_model/go/metrics.pb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,723 +0,0 @@ | ||||
| // Code generated by protoc-gen-go. DO NOT EDIT. | ||||
| // source: metrics.proto | ||||
|  | ||||
| package io_prometheus_client | ||||
|  | ||||
| import ( | ||||
| 	fmt "fmt" | ||||
| 	proto "github.com/golang/protobuf/proto" | ||||
| 	timestamp "github.com/golang/protobuf/ptypes/timestamp" | ||||
| 	math "math" | ||||
| ) | ||||
|  | ||||
| // Reference imports to suppress errors if they are not otherwise used. | ||||
| var _ = proto.Marshal | ||||
| var _ = fmt.Errorf | ||||
| var _ = math.Inf | ||||
|  | ||||
| // This is a compile-time assertion to ensure that this generated file | ||||
| // is compatible with the proto package it is being compiled against. | ||||
| // A compilation error at this line likely means your copy of the | ||||
| // proto package needs to be updated. | ||||
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package | ||||
|  | ||||
| type MetricType int32 | ||||
|  | ||||
| const ( | ||||
| 	MetricType_COUNTER   MetricType = 0 | ||||
| 	MetricType_GAUGE     MetricType = 1 | ||||
| 	MetricType_SUMMARY   MetricType = 2 | ||||
| 	MetricType_UNTYPED   MetricType = 3 | ||||
| 	MetricType_HISTOGRAM MetricType = 4 | ||||
| ) | ||||
|  | ||||
| var MetricType_name = map[int32]string{ | ||||
| 	0: "COUNTER", | ||||
| 	1: "GAUGE", | ||||
| 	2: "SUMMARY", | ||||
| 	3: "UNTYPED", | ||||
| 	4: "HISTOGRAM", | ||||
| } | ||||
|  | ||||
| var MetricType_value = map[string]int32{ | ||||
| 	"COUNTER":   0, | ||||
| 	"GAUGE":     1, | ||||
| 	"SUMMARY":   2, | ||||
| 	"UNTYPED":   3, | ||||
| 	"HISTOGRAM": 4, | ||||
| } | ||||
|  | ||||
| func (x MetricType) Enum() *MetricType { | ||||
| 	p := new(MetricType) | ||||
| 	*p = x | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (x MetricType) String() string { | ||||
| 	return proto.EnumName(MetricType_name, int32(x)) | ||||
| } | ||||
|  | ||||
| func (x *MetricType) UnmarshalJSON(data []byte) error { | ||||
| 	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	*x = MetricType(value) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (MetricType) EnumDescriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{0} | ||||
| } | ||||
|  | ||||
| type LabelPair struct { | ||||
| 	Name                 *string  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | ||||
| 	Value                *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{} `json:"-"` | ||||
| 	XXX_unrecognized     []byte   `json:"-"` | ||||
| 	XXX_sizecache        int32    `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *LabelPair) Reset()         { *m = LabelPair{} } | ||||
| func (m *LabelPair) String() string { return proto.CompactTextString(m) } | ||||
| func (*LabelPair) ProtoMessage()    {} | ||||
| func (*LabelPair) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{0} | ||||
| } | ||||
|  | ||||
| func (m *LabelPair) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_LabelPair.Unmarshal(m, b) | ||||
| } | ||||
| func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *LabelPair) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_LabelPair.Merge(m, src) | ||||
| } | ||||
| func (m *LabelPair) XXX_Size() int { | ||||
| 	return xxx_messageInfo_LabelPair.Size(m) | ||||
| } | ||||
| func (m *LabelPair) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_LabelPair.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_LabelPair proto.InternalMessageInfo | ||||
|  | ||||
| func (m *LabelPair) GetName() string { | ||||
| 	if m != nil && m.Name != nil { | ||||
| 		return *m.Name | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *LabelPair) GetValue() string { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| type Gauge struct { | ||||
| 	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{} `json:"-"` | ||||
| 	XXX_unrecognized     []byte   `json:"-"` | ||||
| 	XXX_sizecache        int32    `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Gauge) Reset()         { *m = Gauge{} } | ||||
| func (m *Gauge) String() string { return proto.CompactTextString(m) } | ||||
| func (*Gauge) ProtoMessage()    {} | ||||
| func (*Gauge) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{1} | ||||
| } | ||||
|  | ||||
| func (m *Gauge) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Gauge.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Gauge) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Gauge.Merge(m, src) | ||||
| } | ||||
| func (m *Gauge) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Gauge.Size(m) | ||||
| } | ||||
| func (m *Gauge) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Gauge.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Gauge proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Gauge) GetValue() float64 { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type Counter struct { | ||||
| 	Value                *float64  `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | ||||
| 	Exemplar             *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}  `json:"-"` | ||||
| 	XXX_unrecognized     []byte    `json:"-"` | ||||
| 	XXX_sizecache        int32     `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Counter) Reset()         { *m = Counter{} } | ||||
| func (m *Counter) String() string { return proto.CompactTextString(m) } | ||||
| func (*Counter) ProtoMessage()    {} | ||||
| func (*Counter) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{2} | ||||
| } | ||||
|  | ||||
| func (m *Counter) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Counter.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Counter.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Counter) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Counter.Merge(m, src) | ||||
| } | ||||
| func (m *Counter) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Counter.Size(m) | ||||
| } | ||||
| func (m *Counter) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Counter.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Counter proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Counter) GetValue() float64 { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Counter) GetExemplar() *Exemplar { | ||||
| 	if m != nil { | ||||
| 		return m.Exemplar | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Quantile struct { | ||||
| 	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` | ||||
| 	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{} `json:"-"` | ||||
| 	XXX_unrecognized     []byte   `json:"-"` | ||||
| 	XXX_sizecache        int32    `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Quantile) Reset()         { *m = Quantile{} } | ||||
| func (m *Quantile) String() string { return proto.CompactTextString(m) } | ||||
| func (*Quantile) ProtoMessage()    {} | ||||
| func (*Quantile) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{3} | ||||
| } | ||||
|  | ||||
| func (m *Quantile) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Quantile.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Quantile) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Quantile.Merge(m, src) | ||||
| } | ||||
| func (m *Quantile) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Quantile.Size(m) | ||||
| } | ||||
| func (m *Quantile) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Quantile.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Quantile proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Quantile) GetQuantile() float64 { | ||||
| 	if m != nil && m.Quantile != nil { | ||||
| 		return *m.Quantile | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Quantile) GetValue() float64 { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type Summary struct { | ||||
| 	SampleCount          *uint64     `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` | ||||
| 	SampleSum            *float64    `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` | ||||
| 	Quantile             []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}    `json:"-"` | ||||
| 	XXX_unrecognized     []byte      `json:"-"` | ||||
| 	XXX_sizecache        int32       `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Summary) Reset()         { *m = Summary{} } | ||||
| func (m *Summary) String() string { return proto.CompactTextString(m) } | ||||
| func (*Summary) ProtoMessage()    {} | ||||
| func (*Summary) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{4} | ||||
| } | ||||
|  | ||||
| func (m *Summary) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Summary.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Summary.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Summary) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Summary.Merge(m, src) | ||||
| } | ||||
| func (m *Summary) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Summary.Size(m) | ||||
| } | ||||
| func (m *Summary) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Summary.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Summary proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Summary) GetSampleCount() uint64 { | ||||
| 	if m != nil && m.SampleCount != nil { | ||||
| 		return *m.SampleCount | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Summary) GetSampleSum() float64 { | ||||
| 	if m != nil && m.SampleSum != nil { | ||||
| 		return *m.SampleSum | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Summary) GetQuantile() []*Quantile { | ||||
| 	if m != nil { | ||||
| 		return m.Quantile | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Untyped struct { | ||||
| 	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{} `json:"-"` | ||||
| 	XXX_unrecognized     []byte   `json:"-"` | ||||
| 	XXX_sizecache        int32    `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Untyped) Reset()         { *m = Untyped{} } | ||||
| func (m *Untyped) String() string { return proto.CompactTextString(m) } | ||||
| func (*Untyped) ProtoMessage()    {} | ||||
| func (*Untyped) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{5} | ||||
| } | ||||
|  | ||||
| func (m *Untyped) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Untyped.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Untyped) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Untyped.Merge(m, src) | ||||
| } | ||||
| func (m *Untyped) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Untyped.Size(m) | ||||
| } | ||||
| func (m *Untyped) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Untyped.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Untyped proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Untyped) GetValue() float64 { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type Histogram struct { | ||||
| 	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` | ||||
| 	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` | ||||
| 	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}  `json:"-"` | ||||
| 	XXX_unrecognized     []byte    `json:"-"` | ||||
| 	XXX_sizecache        int32     `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Histogram) Reset()         { *m = Histogram{} } | ||||
| func (m *Histogram) String() string { return proto.CompactTextString(m) } | ||||
| func (*Histogram) ProtoMessage()    {} | ||||
| func (*Histogram) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{6} | ||||
| } | ||||
|  | ||||
| func (m *Histogram) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Histogram.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Histogram) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Histogram.Merge(m, src) | ||||
| } | ||||
| func (m *Histogram) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Histogram.Size(m) | ||||
| } | ||||
| func (m *Histogram) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Histogram.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Histogram proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Histogram) GetSampleCount() uint64 { | ||||
| 	if m != nil && m.SampleCount != nil { | ||||
| 		return *m.SampleCount | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Histogram) GetSampleSum() float64 { | ||||
| 	if m != nil && m.SampleSum != nil { | ||||
| 		return *m.SampleSum | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Histogram) GetBucket() []*Bucket { | ||||
| 	if m != nil { | ||||
| 		return m.Bucket | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Bucket struct { | ||||
| 	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` | ||||
| 	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` | ||||
| 	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}  `json:"-"` | ||||
| 	XXX_unrecognized     []byte    `json:"-"` | ||||
| 	XXX_sizecache        int32     `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Bucket) Reset()         { *m = Bucket{} } | ||||
| func (m *Bucket) String() string { return proto.CompactTextString(m) } | ||||
| func (*Bucket) ProtoMessage()    {} | ||||
| func (*Bucket) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{7} | ||||
| } | ||||
|  | ||||
| func (m *Bucket) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Bucket.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Bucket) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Bucket.Merge(m, src) | ||||
| } | ||||
| func (m *Bucket) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Bucket.Size(m) | ||||
| } | ||||
| func (m *Bucket) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Bucket.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Bucket proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Bucket) GetCumulativeCount() uint64 { | ||||
| 	if m != nil && m.CumulativeCount != nil { | ||||
| 		return *m.CumulativeCount | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Bucket) GetUpperBound() float64 { | ||||
| 	if m != nil && m.UpperBound != nil { | ||||
| 		return *m.UpperBound | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Bucket) GetExemplar() *Exemplar { | ||||
| 	if m != nil { | ||||
| 		return m.Exemplar | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Exemplar struct { | ||||
| 	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` | ||||
| 	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` | ||||
| 	Timestamp            *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}             `json:"-"` | ||||
| 	XXX_unrecognized     []byte               `json:"-"` | ||||
| 	XXX_sizecache        int32                `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Exemplar) Reset()         { *m = Exemplar{} } | ||||
| func (m *Exemplar) String() string { return proto.CompactTextString(m) } | ||||
| func (*Exemplar) ProtoMessage()    {} | ||||
| func (*Exemplar) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{8} | ||||
| } | ||||
|  | ||||
| func (m *Exemplar) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Exemplar.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Exemplar) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Exemplar.Merge(m, src) | ||||
| } | ||||
| func (m *Exemplar) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Exemplar.Size(m) | ||||
| } | ||||
| func (m *Exemplar) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Exemplar.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Exemplar proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Exemplar) GetLabel() []*LabelPair { | ||||
| 	if m != nil { | ||||
| 		return m.Label | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Exemplar) GetValue() float64 { | ||||
| 	if m != nil && m.Value != nil { | ||||
| 		return *m.Value | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { | ||||
| 	if m != nil { | ||||
| 		return m.Timestamp | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type Metric struct { | ||||
| 	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` | ||||
| 	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` | ||||
| 	Counter              *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` | ||||
| 	Summary              *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` | ||||
| 	Untyped              *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` | ||||
| 	Histogram            *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` | ||||
| 	TimestampMs          *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}     `json:"-"` | ||||
| 	XXX_unrecognized     []byte       `json:"-"` | ||||
| 	XXX_sizecache        int32        `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *Metric) Reset()         { *m = Metric{} } | ||||
| func (m *Metric) String() string { return proto.CompactTextString(m) } | ||||
| func (*Metric) ProtoMessage()    {} | ||||
| func (*Metric) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{9} | ||||
| } | ||||
|  | ||||
| func (m *Metric) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_Metric.Unmarshal(m, b) | ||||
| } | ||||
| func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_Metric.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *Metric) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_Metric.Merge(m, src) | ||||
| } | ||||
| func (m *Metric) XXX_Size() int { | ||||
| 	return xxx_messageInfo_Metric.Size(m) | ||||
| } | ||||
| func (m *Metric) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_Metric.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_Metric proto.InternalMessageInfo | ||||
|  | ||||
| func (m *Metric) GetLabel() []*LabelPair { | ||||
| 	if m != nil { | ||||
| 		return m.Label | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetGauge() *Gauge { | ||||
| 	if m != nil { | ||||
| 		return m.Gauge | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetCounter() *Counter { | ||||
| 	if m != nil { | ||||
| 		return m.Counter | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetSummary() *Summary { | ||||
| 	if m != nil { | ||||
| 		return m.Summary | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetUntyped() *Untyped { | ||||
| 	if m != nil { | ||||
| 		return m.Untyped | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetHistogram() *Histogram { | ||||
| 	if m != nil { | ||||
| 		return m.Histogram | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Metric) GetTimestampMs() int64 { | ||||
| 	if m != nil && m.TimestampMs != nil { | ||||
| 		return *m.TimestampMs | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| type MetricFamily struct { | ||||
| 	Name                 *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | ||||
| 	Help                 *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` | ||||
| 	Type                 *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` | ||||
| 	Metric               []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` | ||||
| 	XXX_NoUnkeyedLiteral struct{}    `json:"-"` | ||||
| 	XXX_unrecognized     []byte      `json:"-"` | ||||
| 	XXX_sizecache        int32       `json:"-"` | ||||
| } | ||||
|  | ||||
| func (m *MetricFamily) Reset()         { *m = MetricFamily{} } | ||||
| func (m *MetricFamily) String() string { return proto.CompactTextString(m) } | ||||
| func (*MetricFamily) ProtoMessage()    {} | ||||
| func (*MetricFamily) Descriptor() ([]byte, []int) { | ||||
| 	return fileDescriptor_6039342a2ba47b72, []int{10} | ||||
| } | ||||
|  | ||||
| func (m *MetricFamily) XXX_Unmarshal(b []byte) error { | ||||
| 	return xxx_messageInfo_MetricFamily.Unmarshal(m, b) | ||||
| } | ||||
| func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | ||||
| 	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) | ||||
| } | ||||
| func (m *MetricFamily) XXX_Merge(src proto.Message) { | ||||
| 	xxx_messageInfo_MetricFamily.Merge(m, src) | ||||
| } | ||||
| func (m *MetricFamily) XXX_Size() int { | ||||
| 	return xxx_messageInfo_MetricFamily.Size(m) | ||||
| } | ||||
| func (m *MetricFamily) XXX_DiscardUnknown() { | ||||
| 	xxx_messageInfo_MetricFamily.DiscardUnknown(m) | ||||
| } | ||||
|  | ||||
| var xxx_messageInfo_MetricFamily proto.InternalMessageInfo | ||||
|  | ||||
| func (m *MetricFamily) GetName() string { | ||||
| 	if m != nil && m.Name != nil { | ||||
| 		return *m.Name | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *MetricFamily) GetHelp() string { | ||||
| 	if m != nil && m.Help != nil { | ||||
| 		return *m.Help | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *MetricFamily) GetType() MetricType { | ||||
| 	if m != nil && m.Type != nil { | ||||
| 		return *m.Type | ||||
| 	} | ||||
| 	return MetricType_COUNTER | ||||
| } | ||||
|  | ||||
| func (m *MetricFamily) GetMetric() []*Metric { | ||||
| 	if m != nil { | ||||
| 		return m.Metric | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) | ||||
| 	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") | ||||
| 	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") | ||||
| 	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") | ||||
| 	proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") | ||||
| 	proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") | ||||
| 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") | ||||
| 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") | ||||
| 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") | ||||
| 	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") | ||||
| 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") | ||||
| 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") | ||||
| } | ||||
|  | ||||
| func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } | ||||
|  | ||||
| var fileDescriptor_6039342a2ba47b72 = []byte{ | ||||
| 	// 665 bytes of a gzipped FileDescriptorProto | ||||
| 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, | ||||
| 	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, | ||||
| 	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, | ||||
| 	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, | ||||
| 	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, | ||||
| 	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, | ||||
| 	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, | ||||
| 	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, | ||||
| 	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, | ||||
| 	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, | ||||
| 	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, | ||||
| 	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, | ||||
| 	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, | ||||
| 	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, | ||||
| 	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, | ||||
| 	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, | ||||
| 	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, | ||||
| 	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, | ||||
| 	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, | ||||
| 	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, | ||||
| 	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, | ||||
| 	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, | ||||
| 	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, | ||||
| 	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, | ||||
| 	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, | ||||
| 	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, | ||||
| 	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, | ||||
| 	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, | ||||
| 	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, | ||||
| 	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, | ||||
| 	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, | ||||
| 	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, | ||||
| 	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, | ||||
| 	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, | ||||
| 	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, | ||||
| 	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, | ||||
| 	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, | ||||
| 	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, | ||||
| 	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, | ||||
| 	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, | ||||
| 	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, | ||||
| 	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, | ||||
| } | ||||
							
								
								
									
										201
									
								
								vendor/github.com/prometheus/common/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/prometheus/common/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,201 +0,0 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										5
									
								
								vendor/github.com/prometheus/common/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/prometheus/common/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,5 +0,0 @@ | ||||
| Common libraries shared by Prometheus Go components. | ||||
| Copyright 2015 The Prometheus Authors | ||||
|  | ||||
| This product includes software developed at | ||||
| SoundCloud Ltd. (http://soundcloud.com/). | ||||
							
								
								
									
										429
									
								
								vendor/github.com/prometheus/common/expfmt/decode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										429
									
								
								vendor/github.com/prometheus/common/expfmt/decode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,429 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"mime" | ||||
| 	"net/http" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
|  | ||||
| 	"github.com/matttproud/golang_protobuf_extensions/pbutil" | ||||
| 	"github.com/prometheus/common/model" | ||||
| ) | ||||
|  | ||||
| // Decoder types decode an input stream into metric families. | ||||
| type Decoder interface { | ||||
| 	Decode(*dto.MetricFamily) error | ||||
| } | ||||
|  | ||||
| // DecodeOptions contains options used by the Decoder and in sample extraction. | ||||
| type DecodeOptions struct { | ||||
| 	// Timestamp is added to each value from the stream that has no explicit timestamp set. | ||||
| 	Timestamp model.Time | ||||
| } | ||||
|  | ||||
| // ResponseFormat extracts the correct format from a HTTP response header. | ||||
| // If no matching format can be found FormatUnknown is returned. | ||||
| func ResponseFormat(h http.Header) Format { | ||||
| 	ct := h.Get(hdrContentType) | ||||
|  | ||||
| 	mediatype, params, err := mime.ParseMediaType(ct) | ||||
| 	if err != nil { | ||||
| 		return FmtUnknown | ||||
| 	} | ||||
|  | ||||
| 	const textType = "text/plain" | ||||
|  | ||||
| 	switch mediatype { | ||||
| 	case ProtoType: | ||||
| 		if p, ok := params["proto"]; ok && p != ProtoProtocol { | ||||
| 			return FmtUnknown | ||||
| 		} | ||||
| 		if e, ok := params["encoding"]; ok && e != "delimited" { | ||||
| 			return FmtUnknown | ||||
| 		} | ||||
| 		return FmtProtoDelim | ||||
|  | ||||
| 	case textType: | ||||
| 		if v, ok := params["version"]; ok && v != TextVersion { | ||||
| 			return FmtUnknown | ||||
| 		} | ||||
| 		return FmtText | ||||
| 	} | ||||
|  | ||||
| 	return FmtUnknown | ||||
| } | ||||
|  | ||||
| // NewDecoder returns a new decoder based on the given input format. | ||||
| // If the input format does not imply otherwise, a text format decoder is returned. | ||||
| func NewDecoder(r io.Reader, format Format) Decoder { | ||||
| 	switch format { | ||||
| 	case FmtProtoDelim: | ||||
| 		return &protoDecoder{r: r} | ||||
| 	} | ||||
| 	return &textDecoder{r: r} | ||||
| } | ||||
|  | ||||
| // protoDecoder implements the Decoder interface for protocol buffers. | ||||
| type protoDecoder struct { | ||||
| 	r io.Reader | ||||
| } | ||||
|  | ||||
| // Decode implements the Decoder interface. | ||||
| func (d *protoDecoder) Decode(v *dto.MetricFamily) error { | ||||
| 	_, err := pbutil.ReadDelimited(d.r, v) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if !model.IsValidMetricName(model.LabelValue(v.GetName())) { | ||||
| 		return fmt.Errorf("invalid metric name %q", v.GetName()) | ||||
| 	} | ||||
| 	for _, m := range v.GetMetric() { | ||||
| 		if m == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		for _, l := range m.GetLabel() { | ||||
| 			if l == nil { | ||||
| 				continue | ||||
| 			} | ||||
| 			if !model.LabelValue(l.GetValue()).IsValid() { | ||||
| 				return fmt.Errorf("invalid label value %q", l.GetValue()) | ||||
| 			} | ||||
| 			if !model.LabelName(l.GetName()).IsValid() { | ||||
| 				return fmt.Errorf("invalid label name %q", l.GetName()) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // textDecoder implements the Decoder interface for the text protocol. | ||||
| type textDecoder struct { | ||||
| 	r    io.Reader | ||||
| 	p    TextParser | ||||
| 	fams []*dto.MetricFamily | ||||
| } | ||||
|  | ||||
| // Decode implements the Decoder interface. | ||||
| func (d *textDecoder) Decode(v *dto.MetricFamily) error { | ||||
| 	// TODO(fabxc): Wrap this as a line reader to make streaming safer. | ||||
| 	if len(d.fams) == 0 { | ||||
| 		// No cached metric families, read everything and parse metrics. | ||||
| 		fams, err := d.p.TextToMetricFamilies(d.r) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if len(fams) == 0 { | ||||
| 			return io.EOF | ||||
| 		} | ||||
| 		d.fams = make([]*dto.MetricFamily, 0, len(fams)) | ||||
| 		for _, f := range fams { | ||||
| 			d.fams = append(d.fams, f) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	*v = *d.fams[0] | ||||
| 	d.fams = d.fams[1:] | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // SampleDecoder wraps a Decoder to extract samples from the metric families | ||||
| // decoded by the wrapped Decoder. | ||||
| type SampleDecoder struct { | ||||
| 	Dec  Decoder | ||||
| 	Opts *DecodeOptions | ||||
|  | ||||
| 	f dto.MetricFamily | ||||
| } | ||||
|  | ||||
| // Decode calls the Decode method of the wrapped Decoder and then extracts the | ||||
| // samples from the decoded MetricFamily into the provided model.Vector. | ||||
| func (sd *SampleDecoder) Decode(s *model.Vector) error { | ||||
| 	err := sd.Dec.Decode(&sd.f) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	*s, err = extractSamples(&sd.f, sd.Opts) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // ExtractSamples builds a slice of samples from the provided metric | ||||
| // families. If an error occurs during sample extraction, it continues to | ||||
| // extract from the remaining metric families. The returned error is the last | ||||
| // error that has occurred. | ||||
| func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { | ||||
| 	var ( | ||||
| 		all     model.Vector | ||||
| 		lastErr error | ||||
| 	) | ||||
| 	for _, f := range fams { | ||||
| 		some, err := extractSamples(f, o) | ||||
| 		if err != nil { | ||||
| 			lastErr = err | ||||
| 			continue | ||||
| 		} | ||||
| 		all = append(all, some...) | ||||
| 	} | ||||
| 	return all, lastErr | ||||
| } | ||||
|  | ||||
| func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { | ||||
| 	switch f.GetType() { | ||||
| 	case dto.MetricType_COUNTER: | ||||
| 		return extractCounter(o, f), nil | ||||
| 	case dto.MetricType_GAUGE: | ||||
| 		return extractGauge(o, f), nil | ||||
| 	case dto.MetricType_SUMMARY: | ||||
| 		return extractSummary(o, f), nil | ||||
| 	case dto.MetricType_UNTYPED: | ||||
| 		return extractUntyped(o, f), nil | ||||
| 	case dto.MetricType_HISTOGRAM: | ||||
| 		return extractHistogram(o, f), nil | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) | ||||
| } | ||||
|  | ||||
| func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | ||||
| 	samples := make(model.Vector, 0, len(f.Metric)) | ||||
|  | ||||
| 	for _, m := range f.Metric { | ||||
| 		if m.Counter == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		lset := make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | ||||
|  | ||||
| 		smpl := &model.Sample{ | ||||
| 			Metric: model.Metric(lset), | ||||
| 			Value:  model.SampleValue(m.Counter.GetValue()), | ||||
| 		} | ||||
|  | ||||
| 		if m.TimestampMs != nil { | ||||
| 			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | ||||
| 		} else { | ||||
| 			smpl.Timestamp = o.Timestamp | ||||
| 		} | ||||
|  | ||||
| 		samples = append(samples, smpl) | ||||
| 	} | ||||
|  | ||||
| 	return samples | ||||
| } | ||||
|  | ||||
| func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | ||||
| 	samples := make(model.Vector, 0, len(f.Metric)) | ||||
|  | ||||
| 	for _, m := range f.Metric { | ||||
| 		if m.Gauge == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		lset := make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | ||||
|  | ||||
| 		smpl := &model.Sample{ | ||||
| 			Metric: model.Metric(lset), | ||||
| 			Value:  model.SampleValue(m.Gauge.GetValue()), | ||||
| 		} | ||||
|  | ||||
| 		if m.TimestampMs != nil { | ||||
| 			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | ||||
| 		} else { | ||||
| 			smpl.Timestamp = o.Timestamp | ||||
| 		} | ||||
|  | ||||
| 		samples = append(samples, smpl) | ||||
| 	} | ||||
|  | ||||
| 	return samples | ||||
| } | ||||
|  | ||||
| func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | ||||
| 	samples := make(model.Vector, 0, len(f.Metric)) | ||||
|  | ||||
| 	for _, m := range f.Metric { | ||||
| 		if m.Untyped == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		lset := make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | ||||
|  | ||||
| 		smpl := &model.Sample{ | ||||
| 			Metric: model.Metric(lset), | ||||
| 			Value:  model.SampleValue(m.Untyped.GetValue()), | ||||
| 		} | ||||
|  | ||||
| 		if m.TimestampMs != nil { | ||||
| 			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | ||||
| 		} else { | ||||
| 			smpl.Timestamp = o.Timestamp | ||||
| 		} | ||||
|  | ||||
| 		samples = append(samples, smpl) | ||||
| 	} | ||||
|  | ||||
| 	return samples | ||||
| } | ||||
|  | ||||
| func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | ||||
| 	samples := make(model.Vector, 0, len(f.Metric)) | ||||
|  | ||||
| 	for _, m := range f.Metric { | ||||
| 		if m.Summary == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		timestamp := o.Timestamp | ||||
| 		if m.TimestampMs != nil { | ||||
| 			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | ||||
| 		} | ||||
|  | ||||
| 		for _, q := range m.Summary.Quantile { | ||||
| 			lset := make(model.LabelSet, len(m.Label)+2) | ||||
| 			for _, p := range m.Label { | ||||
| 				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 			} | ||||
| 			// BUG(matt): Update other names to "quantile". | ||||
| 			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) | ||||
| 			lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | ||||
|  | ||||
| 			samples = append(samples, &model.Sample{ | ||||
| 				Metric:    model.Metric(lset), | ||||
| 				Value:     model.SampleValue(q.GetValue()), | ||||
| 				Timestamp: timestamp, | ||||
| 			}) | ||||
| 		} | ||||
|  | ||||
| 		lset := make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") | ||||
|  | ||||
| 		samples = append(samples, &model.Sample{ | ||||
| 			Metric:    model.Metric(lset), | ||||
| 			Value:     model.SampleValue(m.Summary.GetSampleSum()), | ||||
| 			Timestamp: timestamp, | ||||
| 		}) | ||||
|  | ||||
| 		lset = make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") | ||||
|  | ||||
| 		samples = append(samples, &model.Sample{ | ||||
| 			Metric:    model.Metric(lset), | ||||
| 			Value:     model.SampleValue(m.Summary.GetSampleCount()), | ||||
| 			Timestamp: timestamp, | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return samples | ||||
| } | ||||
|  | ||||
| func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | ||||
| 	samples := make(model.Vector, 0, len(f.Metric)) | ||||
|  | ||||
| 	for _, m := range f.Metric { | ||||
| 		if m.Histogram == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		timestamp := o.Timestamp | ||||
| 		if m.TimestampMs != nil { | ||||
| 			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | ||||
| 		} | ||||
|  | ||||
| 		infSeen := false | ||||
|  | ||||
| 		for _, q := range m.Histogram.Bucket { | ||||
| 			lset := make(model.LabelSet, len(m.Label)+2) | ||||
| 			for _, p := range m.Label { | ||||
| 				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 			} | ||||
| 			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) | ||||
| 			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") | ||||
|  | ||||
| 			if math.IsInf(q.GetUpperBound(), +1) { | ||||
| 				infSeen = true | ||||
| 			} | ||||
|  | ||||
| 			samples = append(samples, &model.Sample{ | ||||
| 				Metric:    model.Metric(lset), | ||||
| 				Value:     model.SampleValue(q.GetCumulativeCount()), | ||||
| 				Timestamp: timestamp, | ||||
| 			}) | ||||
| 		} | ||||
|  | ||||
| 		lset := make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") | ||||
|  | ||||
| 		samples = append(samples, &model.Sample{ | ||||
| 			Metric:    model.Metric(lset), | ||||
| 			Value:     model.SampleValue(m.Histogram.GetSampleSum()), | ||||
| 			Timestamp: timestamp, | ||||
| 		}) | ||||
|  | ||||
| 		lset = make(model.LabelSet, len(m.Label)+1) | ||||
| 		for _, p := range m.Label { | ||||
| 			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 		} | ||||
| 		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") | ||||
|  | ||||
| 		count := &model.Sample{ | ||||
| 			Metric:    model.Metric(lset), | ||||
| 			Value:     model.SampleValue(m.Histogram.GetSampleCount()), | ||||
| 			Timestamp: timestamp, | ||||
| 		} | ||||
| 		samples = append(samples, count) | ||||
|  | ||||
| 		if !infSeen { | ||||
| 			// Append an infinity bucket sample. | ||||
| 			lset := make(model.LabelSet, len(m.Label)+2) | ||||
| 			for _, p := range m.Label { | ||||
| 				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | ||||
| 			} | ||||
| 			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") | ||||
| 			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") | ||||
|  | ||||
| 			samples = append(samples, &model.Sample{ | ||||
| 				Metric:    model.Metric(lset), | ||||
| 				Value:     count.Value, | ||||
| 				Timestamp: timestamp, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return samples | ||||
| } | ||||
							
								
								
									
										162
									
								
								vendor/github.com/prometheus/common/expfmt/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										162
									
								
								vendor/github.com/prometheus/common/expfmt/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,162 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/matttproud/golang_protobuf_extensions/pbutil" | ||||
| 	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // Encoder types encode metric families into an underlying wire protocol. | ||||
| type Encoder interface { | ||||
| 	Encode(*dto.MetricFamily) error | ||||
| } | ||||
|  | ||||
| // Closer is implemented by Encoders that need to be closed to finalize | ||||
| // encoding. (For example, OpenMetrics needs a final `# EOF` line.) | ||||
| // | ||||
| // Note that all Encoder implementations returned from this package implement | ||||
| // Closer, too, even if the Close call is a no-op. This happens in preparation | ||||
| // for adding a Close method to the Encoder interface directly in a (mildly | ||||
| // breaking) release in the future. | ||||
| type Closer interface { | ||||
| 	Close() error | ||||
| } | ||||
|  | ||||
| type encoderCloser struct { | ||||
| 	encode func(*dto.MetricFamily) error | ||||
| 	close  func() error | ||||
| } | ||||
|  | ||||
| func (ec encoderCloser) Encode(v *dto.MetricFamily) error { | ||||
| 	return ec.encode(v) | ||||
| } | ||||
|  | ||||
| func (ec encoderCloser) Close() error { | ||||
| 	return ec.close() | ||||
| } | ||||
|  | ||||
| // Negotiate returns the Content-Type based on the given Accept header. If no | ||||
| // appropriate accepted type is found, FmtText is returned (which is the | ||||
| // Prometheus text format). This function will never negotiate FmtOpenMetrics, | ||||
| // as the support is still experimental. To include the option to negotiate | ||||
| // FmtOpenMetrics, use NegotiateOpenMetrics. | ||||
| func Negotiate(h http.Header) Format { | ||||
| 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { | ||||
| 		ver := ac.Params["version"] | ||||
| 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { | ||||
| 			switch ac.Params["encoding"] { | ||||
| 			case "delimited": | ||||
| 				return FmtProtoDelim | ||||
| 			case "text": | ||||
| 				return FmtProtoText | ||||
| 			case "compact-text": | ||||
| 				return FmtProtoCompact | ||||
| 			} | ||||
| 		} | ||||
| 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { | ||||
| 			return FmtText | ||||
| 		} | ||||
| 	} | ||||
| 	return FmtText | ||||
| } | ||||
|  | ||||
| // NegotiateIncludingOpenMetrics works like Negotiate but includes | ||||
| // FmtOpenMetrics as an option for the result. Note that this function is | ||||
| // temporary and will disappear once FmtOpenMetrics is fully supported and as | ||||
| // such may be negotiated by the normal Negotiate function. | ||||
| func NegotiateIncludingOpenMetrics(h http.Header) Format { | ||||
| 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { | ||||
| 		ver := ac.Params["version"] | ||||
| 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { | ||||
| 			switch ac.Params["encoding"] { | ||||
| 			case "delimited": | ||||
| 				return FmtProtoDelim | ||||
| 			case "text": | ||||
| 				return FmtProtoText | ||||
| 			case "compact-text": | ||||
| 				return FmtProtoCompact | ||||
| 			} | ||||
| 		} | ||||
| 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { | ||||
| 			return FmtText | ||||
| 		} | ||||
| 		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { | ||||
| 			return FmtOpenMetrics | ||||
| 		} | ||||
| 	} | ||||
| 	return FmtText | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a new encoder based on content type negotiation. All | ||||
| // Encoder implementations returned by NewEncoder also implement Closer, and | ||||
| // callers should always call the Close method. It is currently only required | ||||
| // for FmtOpenMetrics, but a future (breaking) release will add the Close method | ||||
| // to the Encoder interface directly. The current version of the Encoder | ||||
| // interface is kept for backwards compatibility. | ||||
| func NewEncoder(w io.Writer, format Format) Encoder { | ||||
| 	switch format { | ||||
| 	case FmtProtoDelim: | ||||
| 		return encoderCloser{ | ||||
| 			encode: func(v *dto.MetricFamily) error { | ||||
| 				_, err := pbutil.WriteDelimited(w, v) | ||||
| 				return err | ||||
| 			}, | ||||
| 			close: func() error { return nil }, | ||||
| 		} | ||||
| 	case FmtProtoCompact: | ||||
| 		return encoderCloser{ | ||||
| 			encode: func(v *dto.MetricFamily) error { | ||||
| 				_, err := fmt.Fprintln(w, v.String()) | ||||
| 				return err | ||||
| 			}, | ||||
| 			close: func() error { return nil }, | ||||
| 		} | ||||
| 	case FmtProtoText: | ||||
| 		return encoderCloser{ | ||||
| 			encode: func(v *dto.MetricFamily) error { | ||||
| 				_, err := fmt.Fprintln(w, proto.MarshalTextString(v)) | ||||
| 				return err | ||||
| 			}, | ||||
| 			close: func() error { return nil }, | ||||
| 		} | ||||
| 	case FmtText: | ||||
| 		return encoderCloser{ | ||||
| 			encode: func(v *dto.MetricFamily) error { | ||||
| 				_, err := MetricFamilyToText(w, v) | ||||
| 				return err | ||||
| 			}, | ||||
| 			close: func() error { return nil }, | ||||
| 		} | ||||
| 	case FmtOpenMetrics: | ||||
| 		return encoderCloser{ | ||||
| 			encode: func(v *dto.MetricFamily) error { | ||||
| 				_, err := MetricFamilyToOpenMetrics(w, v) | ||||
| 				return err | ||||
| 			}, | ||||
| 			close: func() error { | ||||
| 				_, err := FinalizeOpenMetrics(w) | ||||
| 				return err | ||||
| 			}, | ||||
| 		} | ||||
| 	} | ||||
| 	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) | ||||
| } | ||||
							
								
								
									
										41
									
								
								vendor/github.com/prometheus/common/expfmt/expfmt.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										41
									
								
								vendor/github.com/prometheus/common/expfmt/expfmt.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,41 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Package expfmt contains tools for reading and writing Prometheus metrics. | ||||
| package expfmt | ||||
|  | ||||
| // Format specifies the HTTP content type of the different wire protocols. | ||||
| type Format string | ||||
|  | ||||
| // Constants to assemble the Content-Type values for the different wire protocols. | ||||
| const ( | ||||
| 	TextVersion        = "0.0.4" | ||||
| 	ProtoType          = `application/vnd.google.protobuf` | ||||
| 	ProtoProtocol      = `io.prometheus.client.MetricFamily` | ||||
| 	ProtoFmt           = ProtoType + "; proto=" + ProtoProtocol + ";" | ||||
| 	OpenMetricsType    = `application/openmetrics-text` | ||||
| 	OpenMetricsVersion = "0.0.1" | ||||
|  | ||||
| 	// The Content-Type values for the different wire protocols. | ||||
| 	FmtUnknown      Format = `<unknown>` | ||||
| 	FmtText         Format = `text/plain; version=` + TextVersion + `; charset=utf-8` | ||||
| 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited` | ||||
| 	FmtProtoText    Format = ProtoFmt + ` encoding=text` | ||||
| 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` | ||||
| 	FmtOpenMetrics  Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	hdrContentType = "Content-Type" | ||||
| 	hdrAccept      = "Accept" | ||||
| ) | ||||
							
								
								
									
										36
									
								
								vendor/github.com/prometheus/common/expfmt/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								vendor/github.com/prometheus/common/expfmt/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,36 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| // Build only when actually fuzzing | ||||
| // +build gofuzz | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import "bytes" | ||||
|  | ||||
| // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: | ||||
| // | ||||
| //     go-fuzz-build github.com/prometheus/common/expfmt | ||||
| //     go-fuzz -bin expfmt-fuzz.zip -workdir fuzz | ||||
| // | ||||
| // Further input samples should go in the folder fuzz/corpus. | ||||
| func Fuzz(in []byte) int { | ||||
| 	parser := TextParser{} | ||||
| 	_, err := parser.TextToMetricFamilies(bytes.NewReader(in)) | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	return 1 | ||||
| } | ||||
							
								
								
									
										527
									
								
								vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										527
									
								
								vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,527 +0,0 @@ | ||||
| // Copyright 2020 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/golang/protobuf/ptypes" | ||||
| 	"github.com/prometheus/common/model" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the | ||||
| // OpenMetrics text format and writes the resulting lines to 'out'. It returns | ||||
| // the number of bytes written and any error encountered. The output will have | ||||
| // the same order as the input, no further sorting is performed. Furthermore, | ||||
| // this function assumes the input is already sanitized and does not perform any | ||||
| // sanity checks. If the input contains duplicate metrics or invalid metric or | ||||
| // label names, the conversion will result in invalid text format output. | ||||
| // | ||||
| // This function fulfills the type 'expfmt.encoder'. | ||||
| // | ||||
| // Note that OpenMetrics requires a final `# EOF` line. Since this function acts | ||||
| // on individual metric families, it is the responsibility of the caller to | ||||
| // append this line to 'out' once all metric families have been written. | ||||
| // Conveniently, this can be done by calling FinalizeOpenMetrics. | ||||
| // | ||||
| // The output should be fully OpenMetrics compliant. However, there are a few | ||||
| // missing features and peculiarities to avoid complications when switching from | ||||
| // Prometheus to OpenMetrics or vice versa: | ||||
| // | ||||
| // - Counters are expected to have the `_total` suffix in their metric name. In | ||||
| //   the output, the suffix will be truncated from the `# TYPE` and `# HELP` | ||||
| //   line. A counter with a missing `_total` suffix is not an error. However, | ||||
| //   its type will be set to `unknown` in that case to avoid invalid OpenMetrics | ||||
| //   output. | ||||
| // | ||||
| // - No support for the following (optional) features: `# UNIT` line, `_created` | ||||
| //   line, info type, stateset type, gaugehistogram type. | ||||
| // | ||||
| // - The size of exemplar labels is not checked (i.e. it's possible to create | ||||
| //   exemplars that are larger than allowed by the OpenMetrics specification). | ||||
| // | ||||
| // - The value of Counters is not checked. (OpenMetrics doesn't allow counters | ||||
| //   with a `NaN` value.) | ||||
| func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { | ||||
| 	name := in.GetName() | ||||
| 	if name == "" { | ||||
| 		return 0, fmt.Errorf("MetricFamily has no name: %s", in) | ||||
| 	} | ||||
|  | ||||
| 	// Try the interface upgrade. If it doesn't work, we'll use a | ||||
| 	// bufio.Writer from the sync.Pool. | ||||
| 	w, ok := out.(enhancedWriter) | ||||
| 	if !ok { | ||||
| 		b := bufPool.Get().(*bufio.Writer) | ||||
| 		b.Reset(out) | ||||
| 		w = b | ||||
| 		defer func() { | ||||
| 			bErr := b.Flush() | ||||
| 			if err == nil { | ||||
| 				err = bErr | ||||
| 			} | ||||
| 			bufPool.Put(b) | ||||
| 		}() | ||||
| 	} | ||||
|  | ||||
| 	var ( | ||||
| 		n          int | ||||
| 		metricType = in.GetType() | ||||
| 		shortName  = name | ||||
| 	) | ||||
| 	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { | ||||
| 		shortName = name[:len(name)-6] | ||||
| 	} | ||||
|  | ||||
| 	// Comments, first HELP, then TYPE. | ||||
| 	if in.Help != nil { | ||||
| 		n, err = w.WriteString("# HELP ") | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		n, err = w.WriteString(shortName) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		err = w.WriteByte(' ') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		n, err = writeEscapedString(w, *in.Help, true) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		err = w.WriteByte('\n') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	n, err = w.WriteString("# TYPE ") | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	n, err = w.WriteString(shortName) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	switch metricType { | ||||
| 	case dto.MetricType_COUNTER: | ||||
| 		if strings.HasSuffix(name, "_total") { | ||||
| 			n, err = w.WriteString(" counter\n") | ||||
| 		} else { | ||||
| 			n, err = w.WriteString(" unknown\n") | ||||
| 		} | ||||
| 	case dto.MetricType_GAUGE: | ||||
| 		n, err = w.WriteString(" gauge\n") | ||||
| 	case dto.MetricType_SUMMARY: | ||||
| 		n, err = w.WriteString(" summary\n") | ||||
| 	case dto.MetricType_UNTYPED: | ||||
| 		n, err = w.WriteString(" unknown\n") | ||||
| 	case dto.MetricType_HISTOGRAM: | ||||
| 		n, err = w.WriteString(" histogram\n") | ||||
| 	default: | ||||
| 		return written, fmt.Errorf("unknown metric type %s", metricType.String()) | ||||
| 	} | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Finally the samples, one line for each. | ||||
| 	for _, metric := range in.Metric { | ||||
| 		switch metricType { | ||||
| 		case dto.MetricType_COUNTER: | ||||
| 			if metric.Counter == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected counter in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			// Note that we have ensured above that either the name | ||||
| 			// ends on `_total` or that the rendered type is | ||||
| 			// `unknown`. Therefore, no `_total` must be added here. | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Counter.GetValue(), 0, false, | ||||
| 				metric.Counter.Exemplar, | ||||
| 			) | ||||
| 		case dto.MetricType_GAUGE: | ||||
| 			if metric.Gauge == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected gauge in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Gauge.GetValue(), 0, false, | ||||
| 				nil, | ||||
| 			) | ||||
| 		case dto.MetricType_UNTYPED: | ||||
| 			if metric.Untyped == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected untyped in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Untyped.GetValue(), 0, false, | ||||
| 				nil, | ||||
| 			) | ||||
| 		case dto.MetricType_SUMMARY: | ||||
| 			if metric.Summary == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected summary in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			for _, q := range metric.Summary.Quantile { | ||||
| 				n, err = writeOpenMetricsSample( | ||||
| 					w, name, "", metric, | ||||
| 					model.QuantileLabel, q.GetQuantile(), | ||||
| 					q.GetValue(), 0, false, | ||||
| 					nil, | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "_sum", metric, "", 0, | ||||
| 				metric.Summary.GetSampleSum(), 0, false, | ||||
| 				nil, | ||||
| 			) | ||||
| 			written += n | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "_count", metric, "", 0, | ||||
| 				0, metric.Summary.GetSampleCount(), true, | ||||
| 				nil, | ||||
| 			) | ||||
| 		case dto.MetricType_HISTOGRAM: | ||||
| 			if metric.Histogram == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected histogram in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			infSeen := false | ||||
| 			for _, b := range metric.Histogram.Bucket { | ||||
| 				n, err = writeOpenMetricsSample( | ||||
| 					w, name, "_bucket", metric, | ||||
| 					model.BucketLabel, b.GetUpperBound(), | ||||
| 					0, b.GetCumulativeCount(), true, | ||||
| 					b.Exemplar, | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				if math.IsInf(b.GetUpperBound(), +1) { | ||||
| 					infSeen = true | ||||
| 				} | ||||
| 			} | ||||
| 			if !infSeen { | ||||
| 				n, err = writeOpenMetricsSample( | ||||
| 					w, name, "_bucket", metric, | ||||
| 					model.BucketLabel, math.Inf(+1), | ||||
| 					0, metric.Histogram.GetSampleCount(), true, | ||||
| 					nil, | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "_sum", metric, "", 0, | ||||
| 				metric.Histogram.GetSampleSum(), 0, false, | ||||
| 				nil, | ||||
| 			) | ||||
| 			written += n | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 			n, err = writeOpenMetricsSample( | ||||
| 				w, name, "_count", metric, "", 0, | ||||
| 				0, metric.Histogram.GetSampleCount(), true, | ||||
| 				nil, | ||||
| 			) | ||||
| 		default: | ||||
| 			return written, fmt.Errorf( | ||||
| 				"unexpected type in metric %s %s", name, metric, | ||||
| 			) | ||||
| 		} | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. | ||||
| func FinalizeOpenMetrics(w io.Writer) (written int, err error) { | ||||
| 	return w.Write([]byte("# EOF\n")) | ||||
| } | ||||
|  | ||||
| // writeOpenMetricsSample writes a single sample in OpenMetrics text format to | ||||
| // w, given the metric name, the metric proto message itself, optionally an | ||||
| // additional label name with a float64 value (use empty string as label name if | ||||
| // not required), the value (optionally as float64 or uint64, determined by | ||||
| // useIntValue), and optionally an exemplar (use nil if not required). The | ||||
| // function returns the number of bytes written and any error encountered. | ||||
| func writeOpenMetricsSample( | ||||
| 	w enhancedWriter, | ||||
| 	name, suffix string, | ||||
| 	metric *dto.Metric, | ||||
| 	additionalLabelName string, additionalLabelValue float64, | ||||
| 	floatValue float64, intValue uint64, useIntValue bool, | ||||
| 	exemplar *dto.Exemplar, | ||||
| ) (int, error) { | ||||
| 	var written int | ||||
| 	n, err := w.WriteString(name) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if suffix != "" { | ||||
| 		n, err = w.WriteString(suffix) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	n, err = writeOpenMetricsLabelPairs( | ||||
| 		w, metric.Label, additionalLabelName, additionalLabelValue, | ||||
| 	) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	err = w.WriteByte(' ') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if useIntValue { | ||||
| 		n, err = writeUint(w, intValue) | ||||
| 	} else { | ||||
| 		n, err = writeOpenMetricsFloat(w, floatValue) | ||||
| 	} | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if metric.TimestampMs != nil { | ||||
| 		err = w.WriteByte(' ') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		// TODO(beorn7): Format this directly without converting to a float first. | ||||
| 		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	if exemplar != nil { | ||||
| 		n, err = writeExemplar(w, exemplar) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	err = w.WriteByte('\n') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	return written, nil | ||||
| } | ||||
|  | ||||
| // writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float | ||||
| // in OpenMetrics style. | ||||
| func writeOpenMetricsLabelPairs( | ||||
| 	w enhancedWriter, | ||||
| 	in []*dto.LabelPair, | ||||
| 	additionalLabelName string, additionalLabelValue float64, | ||||
| ) (int, error) { | ||||
| 	if len(in) == 0 && additionalLabelName == "" { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	var ( | ||||
| 		written   int | ||||
| 		separator byte = '{' | ||||
| 	) | ||||
| 	for _, lp := range in { | ||||
| 		err := w.WriteByte(separator) | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err := w.WriteString(lp.GetName()) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = w.WriteString(`="`) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = writeEscapedString(w, lp.GetValue(), true) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		err = w.WriteByte('"') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		separator = ',' | ||||
| 	} | ||||
| 	if additionalLabelName != "" { | ||||
| 		err := w.WriteByte(separator) | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err := w.WriteString(additionalLabelName) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = w.WriteString(`="`) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = writeOpenMetricsFloat(w, additionalLabelValue) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		err = w.WriteByte('"') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	err := w.WriteByte('}') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	return written, nil | ||||
| } | ||||
|  | ||||
| // writeExemplar writes the provided exemplar in OpenMetrics format to w. The | ||||
| // function returns the number of bytes written and any error encountered. | ||||
| func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { | ||||
| 	written := 0 | ||||
| 	n, err := w.WriteString(" # ") | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	err = w.WriteByte(' ') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	n, err = writeOpenMetricsFloat(w, e.GetValue()) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if e.Timestamp != nil { | ||||
| 		err = w.WriteByte(' ') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		ts, err := ptypes.Timestamp((*e).Timestamp) | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		// TODO(beorn7): Format this directly from components of ts to | ||||
| 		// avoid overflow/underflow and precision issues of the float | ||||
| 		// conversion. | ||||
| 		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	return written, nil | ||||
| } | ||||
|  | ||||
| // writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting | ||||
| // number would otherwise contain neither a "." nor an "e". | ||||
| func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { | ||||
| 	switch { | ||||
| 	case f == 1: | ||||
| 		return w.WriteString("1.0") | ||||
| 	case f == 0: | ||||
| 		return w.WriteString("0.0") | ||||
| 	case f == -1: | ||||
| 		return w.WriteString("-1.0") | ||||
| 	case math.IsNaN(f): | ||||
| 		return w.WriteString("NaN") | ||||
| 	case math.IsInf(f, +1): | ||||
| 		return w.WriteString("+Inf") | ||||
| 	case math.IsInf(f, -1): | ||||
| 		return w.WriteString("-Inf") | ||||
| 	default: | ||||
| 		bp := numBufPool.Get().(*[]byte) | ||||
| 		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) | ||||
| 		if !bytes.ContainsAny(*bp, "e.") { | ||||
| 			*bp = append(*bp, '.', '0') | ||||
| 		} | ||||
| 		written, err := w.Write(*bp) | ||||
| 		numBufPool.Put(bp) | ||||
| 		return written, err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // writeUint is like writeInt just for uint64. | ||||
| func writeUint(w enhancedWriter, u uint64) (int, error) { | ||||
| 	bp := numBufPool.Get().(*[]byte) | ||||
| 	*bp = strconv.AppendUint((*bp)[:0], u, 10) | ||||
| 	written, err := w.Write(*bp) | ||||
| 	numBufPool.Put(bp) | ||||
| 	return written, err | ||||
| } | ||||
							
								
								
									
										465
									
								
								vendor/github.com/prometheus/common/expfmt/text_create.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										465
									
								
								vendor/github.com/prometheus/common/expfmt/text_create.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,465 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"math" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/prometheus/common/model" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
| ) | ||||
|  | ||||
| // enhancedWriter has all the enhanced write functions needed here. bufio.Writer | ||||
| // implements it. | ||||
| type enhancedWriter interface { | ||||
| 	io.Writer | ||||
| 	WriteRune(r rune) (n int, err error) | ||||
| 	WriteString(s string) (n int, err error) | ||||
| 	WriteByte(c byte) error | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	initialNumBufSize = 24 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	bufPool = sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			return bufio.NewWriter(ioutil.Discard) | ||||
| 		}, | ||||
| 	} | ||||
| 	numBufPool = sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			b := make([]byte, 0, initialNumBufSize) | ||||
| 			return &b | ||||
| 		}, | ||||
| 	} | ||||
| ) | ||||
|  | ||||
| // MetricFamilyToText converts a MetricFamily proto message into text format and | ||||
| // writes the resulting lines to 'out'. It returns the number of bytes written | ||||
| // and any error encountered. The output will have the same order as the input, | ||||
| // no further sorting is performed. Furthermore, this function assumes the input | ||||
| // is already sanitized and does not perform any sanity checks. If the input | ||||
| // contains duplicate metrics or invalid metric or label names, the conversion | ||||
| // will result in invalid text format output. | ||||
| // | ||||
| // This method fulfills the type 'prometheus.encoder'. | ||||
| func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { | ||||
| 	// Fail-fast checks. | ||||
| 	if len(in.Metric) == 0 { | ||||
| 		return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) | ||||
| 	} | ||||
| 	name := in.GetName() | ||||
| 	if name == "" { | ||||
| 		return 0, fmt.Errorf("MetricFamily has no name: %s", in) | ||||
| 	} | ||||
|  | ||||
| 	// Try the interface upgrade. If it doesn't work, we'll use a | ||||
| 	// bufio.Writer from the sync.Pool. | ||||
| 	w, ok := out.(enhancedWriter) | ||||
| 	if !ok { | ||||
| 		b := bufPool.Get().(*bufio.Writer) | ||||
| 		b.Reset(out) | ||||
| 		w = b | ||||
| 		defer func() { | ||||
| 			bErr := b.Flush() | ||||
| 			if err == nil { | ||||
| 				err = bErr | ||||
| 			} | ||||
| 			bufPool.Put(b) | ||||
| 		}() | ||||
| 	} | ||||
|  | ||||
| 	var n int | ||||
|  | ||||
| 	// Comments, first HELP, then TYPE. | ||||
| 	if in.Help != nil { | ||||
| 		n, err = w.WriteString("# HELP ") | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		n, err = w.WriteString(name) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		err = w.WriteByte(' ') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		n, err = writeEscapedString(w, *in.Help, false) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		err = w.WriteByte('\n') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	n, err = w.WriteString("# TYPE ") | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	n, err = w.WriteString(name) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	metricType := in.GetType() | ||||
| 	switch metricType { | ||||
| 	case dto.MetricType_COUNTER: | ||||
| 		n, err = w.WriteString(" counter\n") | ||||
| 	case dto.MetricType_GAUGE: | ||||
| 		n, err = w.WriteString(" gauge\n") | ||||
| 	case dto.MetricType_SUMMARY: | ||||
| 		n, err = w.WriteString(" summary\n") | ||||
| 	case dto.MetricType_UNTYPED: | ||||
| 		n, err = w.WriteString(" untyped\n") | ||||
| 	case dto.MetricType_HISTOGRAM: | ||||
| 		n, err = w.WriteString(" histogram\n") | ||||
| 	default: | ||||
| 		return written, fmt.Errorf("unknown metric type %s", metricType.String()) | ||||
| 	} | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Finally the samples, one line for each. | ||||
| 	for _, metric := range in.Metric { | ||||
| 		switch metricType { | ||||
| 		case dto.MetricType_COUNTER: | ||||
| 			if metric.Counter == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected counter in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Counter.GetValue(), | ||||
| 			) | ||||
| 		case dto.MetricType_GAUGE: | ||||
| 			if metric.Gauge == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected gauge in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Gauge.GetValue(), | ||||
| 			) | ||||
| 		case dto.MetricType_UNTYPED: | ||||
| 			if metric.Untyped == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected untyped in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "", metric, "", 0, | ||||
| 				metric.Untyped.GetValue(), | ||||
| 			) | ||||
| 		case dto.MetricType_SUMMARY: | ||||
| 			if metric.Summary == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected summary in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			for _, q := range metric.Summary.Quantile { | ||||
| 				n, err = writeSample( | ||||
| 					w, name, "", metric, | ||||
| 					model.QuantileLabel, q.GetQuantile(), | ||||
| 					q.GetValue(), | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "_sum", metric, "", 0, | ||||
| 				metric.Summary.GetSampleSum(), | ||||
| 			) | ||||
| 			written += n | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "_count", metric, "", 0, | ||||
| 				float64(metric.Summary.GetSampleCount()), | ||||
| 			) | ||||
| 		case dto.MetricType_HISTOGRAM: | ||||
| 			if metric.Histogram == nil { | ||||
| 				return written, fmt.Errorf( | ||||
| 					"expected histogram in metric %s %s", name, metric, | ||||
| 				) | ||||
| 			} | ||||
| 			infSeen := false | ||||
| 			for _, b := range metric.Histogram.Bucket { | ||||
| 				n, err = writeSample( | ||||
| 					w, name, "_bucket", metric, | ||||
| 					model.BucketLabel, b.GetUpperBound(), | ||||
| 					float64(b.GetCumulativeCount()), | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				if math.IsInf(b.GetUpperBound(), +1) { | ||||
| 					infSeen = true | ||||
| 				} | ||||
| 			} | ||||
| 			if !infSeen { | ||||
| 				n, err = writeSample( | ||||
| 					w, name, "_bucket", metric, | ||||
| 					model.BucketLabel, math.Inf(+1), | ||||
| 					float64(metric.Histogram.GetSampleCount()), | ||||
| 				) | ||||
| 				written += n | ||||
| 				if err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "_sum", metric, "", 0, | ||||
| 				metric.Histogram.GetSampleSum(), | ||||
| 			) | ||||
| 			written += n | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 			n, err = writeSample( | ||||
| 				w, name, "_count", metric, "", 0, | ||||
| 				float64(metric.Histogram.GetSampleCount()), | ||||
| 			) | ||||
| 		default: | ||||
| 			return written, fmt.Errorf( | ||||
| 				"unexpected type in metric %s %s", name, metric, | ||||
| 			) | ||||
| 		} | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // writeSample writes a single sample in text format to w, given the metric | ||||
| // name, the metric proto message itself, optionally an additional label name | ||||
| // with a float64 value (use empty string as label name if not required), and | ||||
| // the value. The function returns the number of bytes written and any error | ||||
| // encountered. | ||||
| func writeSample( | ||||
| 	w enhancedWriter, | ||||
| 	name, suffix string, | ||||
| 	metric *dto.Metric, | ||||
| 	additionalLabelName string, additionalLabelValue float64, | ||||
| 	value float64, | ||||
| ) (int, error) { | ||||
| 	var written int | ||||
| 	n, err := w.WriteString(name) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if suffix != "" { | ||||
| 		n, err = w.WriteString(suffix) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	n, err = writeLabelPairs( | ||||
| 		w, metric.Label, additionalLabelName, additionalLabelValue, | ||||
| 	) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	err = w.WriteByte(' ') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	n, err = writeFloat(w, value) | ||||
| 	written += n | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	if metric.TimestampMs != nil { | ||||
| 		err = w.WriteByte(' ') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = writeInt(w, *metric.TimestampMs) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	err = w.WriteByte('\n') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	return written, nil | ||||
| } | ||||
|  | ||||
| // writeLabelPairs converts a slice of LabelPair proto messages plus the | ||||
| // explicitly given additional label pair into text formatted as required by the | ||||
| // text format and writes it to 'w'. An empty slice in combination with an empty | ||||
| // string 'additionalLabelName' results in nothing being written. Otherwise, the | ||||
| // label pairs are written, escaped as required by the text format, and enclosed | ||||
| // in '{...}'. The function returns the number of bytes written and any error | ||||
| // encountered. | ||||
| func writeLabelPairs( | ||||
| 	w enhancedWriter, | ||||
| 	in []*dto.LabelPair, | ||||
| 	additionalLabelName string, additionalLabelValue float64, | ||||
| ) (int, error) { | ||||
| 	if len(in) == 0 && additionalLabelName == "" { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	var ( | ||||
| 		written   int | ||||
| 		separator byte = '{' | ||||
| 	) | ||||
| 	for _, lp := range in { | ||||
| 		err := w.WriteByte(separator) | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err := w.WriteString(lp.GetName()) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = w.WriteString(`="`) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = writeEscapedString(w, lp.GetValue(), true) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		err = w.WriteByte('"') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		separator = ',' | ||||
| 	} | ||||
| 	if additionalLabelName != "" { | ||||
| 		err := w.WriteByte(separator) | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err := w.WriteString(additionalLabelName) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = w.WriteString(`="`) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		n, err = writeFloat(w, additionalLabelValue) | ||||
| 		written += n | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 		err = w.WriteByte('"') | ||||
| 		written++ | ||||
| 		if err != nil { | ||||
| 			return written, err | ||||
| 		} | ||||
| 	} | ||||
| 	err := w.WriteByte('}') | ||||
| 	written++ | ||||
| 	if err != nil { | ||||
| 		return written, err | ||||
| 	} | ||||
| 	return written, nil | ||||
| } | ||||
|  | ||||
| // writeEscapedString replaces '\' by '\\', new line character by '\n', and - if | ||||
| // includeDoubleQuote is true - '"' by '\"'. | ||||
| var ( | ||||
| 	escaper       = strings.NewReplacer("\\", `\\`, "\n", `\n`) | ||||
| 	quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) | ||||
| ) | ||||
|  | ||||
| func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { | ||||
| 	if includeDoubleQuote { | ||||
| 		return quotedEscaper.WriteString(w, v) | ||||
| 	} | ||||
| 	return escaper.WriteString(w, v) | ||||
| } | ||||
|  | ||||
| // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes | ||||
| // a few common cases for increased efficiency. For non-hardcoded cases, it uses | ||||
| // strconv.AppendFloat to avoid allocations, similar to writeInt. | ||||
| func writeFloat(w enhancedWriter, f float64) (int, error) { | ||||
| 	switch { | ||||
| 	case f == 1: | ||||
| 		return 1, w.WriteByte('1') | ||||
| 	case f == 0: | ||||
| 		return 1, w.WriteByte('0') | ||||
| 	case f == -1: | ||||
| 		return w.WriteString("-1") | ||||
| 	case math.IsNaN(f): | ||||
| 		return w.WriteString("NaN") | ||||
| 	case math.IsInf(f, +1): | ||||
| 		return w.WriteString("+Inf") | ||||
| 	case math.IsInf(f, -1): | ||||
| 		return w.WriteString("-Inf") | ||||
| 	default: | ||||
| 		bp := numBufPool.Get().(*[]byte) | ||||
| 		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) | ||||
| 		written, err := w.Write(*bp) | ||||
| 		numBufPool.Put(bp) | ||||
| 		return written, err | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // writeInt is equivalent to fmt.Fprint with an int64 argument but uses | ||||
| // strconv.AppendInt with a byte slice taken from a sync.Pool to avoid | ||||
| // allocations. | ||||
| func writeInt(w enhancedWriter, i int64) (int, error) { | ||||
| 	bp := numBufPool.Get().(*[]byte) | ||||
| 	*bp = strconv.AppendInt((*bp)[:0], i, 10) | ||||
| 	written, err := w.Write(*bp) | ||||
| 	numBufPool.Put(bp) | ||||
| 	return written, err | ||||
| } | ||||
							
								
								
									
										775
									
								
								vendor/github.com/prometheus/common/expfmt/text_parse.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										775
									
								
								vendor/github.com/prometheus/common/expfmt/text_parse.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,775 +0,0 @@ | ||||
| // Copyright 2014 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package expfmt | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	dto "github.com/prometheus/client_model/go" | ||||
|  | ||||
| 	"github.com/golang/protobuf/proto" | ||||
| 	"github.com/prometheus/common/model" | ||||
| ) | ||||
|  | ||||
| // A stateFn is a function that represents a state in a state machine. By | ||||
| // executing it, the state is progressed to the next state. The stateFn returns | ||||
| // another stateFn, which represents the new state. The end state is represented | ||||
| // by nil. | ||||
| type stateFn func() stateFn | ||||
|  | ||||
| // ParseError signals errors while parsing the simple and flat text-based | ||||
| // exchange format. | ||||
| type ParseError struct { | ||||
| 	Line int | ||||
| 	Msg  string | ||||
| } | ||||
|  | ||||
| // Error implements the error interface. | ||||
| func (e ParseError) Error() string { | ||||
| 	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) | ||||
| } | ||||
|  | ||||
| // TextParser is used to parse the simple and flat text-based exchange format. Its | ||||
| // zero value is ready to use. | ||||
| type TextParser struct { | ||||
| 	metricFamiliesByName map[string]*dto.MetricFamily | ||||
| 	buf                  *bufio.Reader // Where the parsed input is read through. | ||||
| 	err                  error         // Most recent error. | ||||
| 	lineCount            int           // Tracks the line count for error messages. | ||||
| 	currentByte          byte          // The most recent byte read. | ||||
| 	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes. | ||||
| 	currentMF            *dto.MetricFamily | ||||
| 	currentMetric        *dto.Metric | ||||
| 	currentLabelPair     *dto.LabelPair | ||||
|  | ||||
| 	// The remaining member variables are only used for summaries/histograms. | ||||
| 	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' | ||||
| 	// Summary specific. | ||||
| 	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature. | ||||
| 	currentQuantile float64 | ||||
| 	// Histogram specific. | ||||
| 	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature. | ||||
| 	currentBucket float64 | ||||
| 	// These tell us if the currently processed line ends on '_count' or | ||||
| 	// '_sum' respectively and belong to a summary/histogram, representing the sample | ||||
| 	// count and sum of that summary/histogram. | ||||
| 	currentIsSummaryCount, currentIsSummarySum     bool | ||||
| 	currentIsHistogramCount, currentIsHistogramSum bool | ||||
| } | ||||
|  | ||||
| // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange | ||||
| // format and creates MetricFamily proto messages. It returns the MetricFamily | ||||
| // proto messages in a map where the metric names are the keys, along with any | ||||
| // error encountered. | ||||
| // | ||||
| // If the input contains duplicate metrics (i.e. lines with the same metric name | ||||
| // and exactly the same label set), the resulting MetricFamily will contain | ||||
| // duplicate Metric proto messages. Similar is true for duplicate label | ||||
| // names. Checks for duplicates have to be performed separately, if required. | ||||
| // Also note that neither the metrics within each MetricFamily are sorted nor | ||||
| // the label pairs within each Metric. Sorting is not required for the most | ||||
| // frequent use of this method, which is sample ingestion in the Prometheus | ||||
| // server. However, for presentation purposes, you might want to sort the | ||||
| // metrics, and in some cases, you must sort the labels, e.g. for consumption by | ||||
| // the metric family injection hook of the Prometheus registry. | ||||
| // | ||||
| // Summaries and histograms are rather special beasts. You would probably not | ||||
| // use them in the simple text format anyway. This method can deal with | ||||
| // summaries and histograms if they are presented in exactly the way the | ||||
| // text.Create function creates them. | ||||
| // | ||||
| // This method must not be called concurrently. If you want to parse different | ||||
| // input concurrently, instantiate a separate Parser for each goroutine. | ||||
| func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { | ||||
| 	p.reset(in) | ||||
| 	for nextState := p.startOfLine; nextState != nil; nextState = nextState() { | ||||
| 		// Magic happens here... | ||||
| 	} | ||||
| 	// Get rid of empty metric families. | ||||
| 	for k, mf := range p.metricFamiliesByName { | ||||
| 		if len(mf.GetMetric()) == 0 { | ||||
| 			delete(p.metricFamiliesByName, k) | ||||
| 		} | ||||
| 	} | ||||
| 	// If p.err is io.EOF now, we have run into a premature end of the input | ||||
| 	// stream. Turn this error into something nicer and more | ||||
| 	// meaningful. (io.EOF is often used as a signal for the legitimate end | ||||
| 	// of an input stream.) | ||||
| 	if p.err == io.EOF { | ||||
| 		p.parseError("unexpected end of input stream") | ||||
| 	} | ||||
| 	return p.metricFamiliesByName, p.err | ||||
| } | ||||
|  | ||||
| func (p *TextParser) reset(in io.Reader) { | ||||
| 	p.metricFamiliesByName = map[string]*dto.MetricFamily{} | ||||
| 	if p.buf == nil { | ||||
| 		p.buf = bufio.NewReader(in) | ||||
| 	} else { | ||||
| 		p.buf.Reset(in) | ||||
| 	} | ||||
| 	p.err = nil | ||||
| 	p.lineCount = 0 | ||||
| 	if p.summaries == nil || len(p.summaries) > 0 { | ||||
| 		p.summaries = map[uint64]*dto.Metric{} | ||||
| 	} | ||||
| 	if p.histograms == nil || len(p.histograms) > 0 { | ||||
| 		p.histograms = map[uint64]*dto.Metric{} | ||||
| 	} | ||||
| 	p.currentQuantile = math.NaN() | ||||
| 	p.currentBucket = math.NaN() | ||||
| } | ||||
|  | ||||
| // startOfLine represents the state where the next byte read from p.buf is the | ||||
| // start of a line (or whitespace leading up to it). | ||||
| func (p *TextParser) startOfLine() stateFn { | ||||
| 	p.lineCount++ | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		// End of input reached. This is the only case where | ||||
| 		// that is not an error but a signal that we are done. | ||||
| 		p.err = nil | ||||
| 		return nil | ||||
| 	} | ||||
| 	switch p.currentByte { | ||||
| 	case '#': | ||||
| 		return p.startComment | ||||
| 	case '\n': | ||||
| 		return p.startOfLine // Empty line, start the next one. | ||||
| 	} | ||||
| 	return p.readingMetricName | ||||
| } | ||||
|  | ||||
| // startComment represents the state where the next byte read from p.buf is the | ||||
| // start of a comment (or whitespace leading up to it). | ||||
| func (p *TextParser) startComment() stateFn { | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte == '\n' { | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	if p.readTokenUntilWhitespace(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	// If we have hit the end of line already, there is nothing left | ||||
| 	// to do. This is not considered a syntax error. | ||||
| 	if p.currentByte == '\n' { | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	keyword := p.currentToken.String() | ||||
| 	if keyword != "HELP" && keyword != "TYPE" { | ||||
| 		// Generic comment, ignore by fast forwarding to end of line. | ||||
| 		for p.currentByte != '\n' { | ||||
| 			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { | ||||
| 				return nil // Unexpected end of input. | ||||
| 			} | ||||
| 		} | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	// There is something. Next has to be a metric name. | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.readTokenAsMetricName(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte == '\n' { | ||||
| 		// At the end of the line already. | ||||
| 		// Again, this is not considered a syntax error. | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	if !isBlankOrTab(p.currentByte) { | ||||
| 		p.parseError("invalid metric name in comment") | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.setOrCreateCurrentMF() | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte == '\n' { | ||||
| 		// At the end of the line already. | ||||
| 		// Again, this is not considered a syntax error. | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	switch keyword { | ||||
| 	case "HELP": | ||||
| 		return p.readingHelp | ||||
| 	case "TYPE": | ||||
| 		return p.readingType | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) | ||||
| } | ||||
|  | ||||
| // readingMetricName represents the state where the last byte read (now in | ||||
| // p.currentByte) is the first byte of a metric name. | ||||
| func (p *TextParser) readingMetricName() stateFn { | ||||
| 	if p.readTokenAsMetricName(); p.err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if p.currentToken.Len() == 0 { | ||||
| 		p.parseError("invalid metric name") | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.setOrCreateCurrentMF() | ||||
| 	// Now is the time to fix the type if it hasn't happened yet. | ||||
| 	if p.currentMF.Type == nil { | ||||
| 		p.currentMF.Type = dto.MetricType_UNTYPED.Enum() | ||||
| 	} | ||||
| 	p.currentMetric = &dto.Metric{} | ||||
| 	// Do not append the newly created currentMetric to | ||||
| 	// currentMF.Metric right now. First wait if this is a summary, | ||||
| 	// and the metric exists already, which we can only know after | ||||
| 	// having read all the labels. | ||||
| 	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	return p.readingLabels | ||||
| } | ||||
|  | ||||
| // readingLabels represents the state where the last byte read (now in | ||||
| // p.currentByte) is either the first byte of the label set (i.e. a '{'), or the | ||||
| // first byte of the value (otherwise). | ||||
| func (p *TextParser) readingLabels() stateFn { | ||||
| 	// Summaries/histograms are special. We have to reset the | ||||
| 	// currentLabels map, currentQuantile and currentBucket before starting to | ||||
| 	// read labels. | ||||
| 	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | ||||
| 		p.currentLabels = map[string]string{} | ||||
| 		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() | ||||
| 		p.currentQuantile = math.NaN() | ||||
| 		p.currentBucket = math.NaN() | ||||
| 	} | ||||
| 	if p.currentByte != '{' { | ||||
| 		return p.readingValue | ||||
| 	} | ||||
| 	return p.startLabelName | ||||
| } | ||||
|  | ||||
| // startLabelName represents the state where the next byte read from p.buf is | ||||
| // the start of a label name (or whitespace leading up to it). | ||||
| func (p *TextParser) startLabelName() stateFn { | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte == '}' { | ||||
| 		if p.skipBlankTab(); p.err != nil { | ||||
| 			return nil // Unexpected end of input. | ||||
| 		} | ||||
| 		return p.readingValue | ||||
| 	} | ||||
| 	if p.readTokenAsLabelName(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentToken.Len() == 0 { | ||||
| 		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} | ||||
| 	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { | ||||
| 		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Special summary/histogram treatment. Don't add 'quantile' and 'le' | ||||
| 	// labels to 'real' labels. | ||||
| 	if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && | ||||
| 		!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { | ||||
| 		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) | ||||
| 	} | ||||
| 	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte != '=' { | ||||
| 		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Check for duplicate label names. | ||||
| 	labels := make(map[string]struct{}) | ||||
| 	for _, l := range p.currentMetric.Label { | ||||
| 		lName := l.GetName() | ||||
| 		if _, exists := labels[lName]; !exists { | ||||
| 			labels[lName] = struct{}{} | ||||
| 		} else { | ||||
| 			p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) | ||||
| 			return nil | ||||
| 		} | ||||
| 	} | ||||
| 	return p.startLabelValue | ||||
| } | ||||
|  | ||||
| // startLabelValue represents the state where the next byte read from p.buf is | ||||
| // the start of a (quoted) label value (or whitespace leading up to it). | ||||
| func (p *TextParser) startLabelValue() stateFn { | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentByte != '"' { | ||||
| 		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) | ||||
| 		return nil | ||||
| 	} | ||||
| 	if p.readTokenAsLabelValue(); p.err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if !model.LabelValue(p.currentToken.String()).IsValid() { | ||||
| 		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.currentLabelPair.Value = proto.String(p.currentToken.String()) | ||||
| 	// Special treatment of summaries: | ||||
| 	// - Quantile labels are special, will result in dto.Quantile later. | ||||
| 	// - Other labels have to be added to currentLabels for signature calculation. | ||||
| 	if p.currentMF.GetType() == dto.MetricType_SUMMARY { | ||||
| 		if p.currentLabelPair.GetName() == model.QuantileLabel { | ||||
| 			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { | ||||
| 				// Create a more helpful error message. | ||||
| 				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) | ||||
| 				return nil | ||||
| 			} | ||||
| 		} else { | ||||
| 			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() | ||||
| 		} | ||||
| 	} | ||||
| 	// Similar special treatment of histograms. | ||||
| 	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | ||||
| 		if p.currentLabelPair.GetName() == model.BucketLabel { | ||||
| 			if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { | ||||
| 				// Create a more helpful error message. | ||||
| 				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) | ||||
| 				return nil | ||||
| 			} | ||||
| 		} else { | ||||
| 			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() | ||||
| 		} | ||||
| 	} | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	switch p.currentByte { | ||||
| 	case ',': | ||||
| 		return p.startLabelName | ||||
|  | ||||
| 	case '}': | ||||
| 		if p.skipBlankTab(); p.err != nil { | ||||
| 			return nil // Unexpected end of input. | ||||
| 		} | ||||
| 		return p.readingValue | ||||
| 	default: | ||||
| 		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readingValue represents the state where the last byte read (now in | ||||
| // p.currentByte) is the first byte of the sample value (i.e. a float). | ||||
| func (p *TextParser) readingValue() stateFn { | ||||
| 	// When we are here, we have read all the labels, so for the | ||||
| 	// special case of a summary/histogram, we can finally find out | ||||
| 	// if the metric already exists. | ||||
| 	if p.currentMF.GetType() == dto.MetricType_SUMMARY { | ||||
| 		signature := model.LabelsToSignature(p.currentLabels) | ||||
| 		if summary := p.summaries[signature]; summary != nil { | ||||
| 			p.currentMetric = summary | ||||
| 		} else { | ||||
| 			p.summaries[signature] = p.currentMetric | ||||
| 			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | ||||
| 		} | ||||
| 	} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | ||||
| 		signature := model.LabelsToSignature(p.currentLabels) | ||||
| 		if histogram := p.histograms[signature]; histogram != nil { | ||||
| 			p.currentMetric = histogram | ||||
| 		} else { | ||||
| 			p.histograms[signature] = p.currentMetric | ||||
| 			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | ||||
| 		} | ||||
| 	} else { | ||||
| 		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | ||||
| 	} | ||||
| 	if p.readTokenUntilWhitespace(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	value, err := parseFloat(p.currentToken.String()) | ||||
| 	if err != nil { | ||||
| 		// Create a more helpful error message. | ||||
| 		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	switch p.currentMF.GetType() { | ||||
| 	case dto.MetricType_COUNTER: | ||||
| 		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} | ||||
| 	case dto.MetricType_GAUGE: | ||||
| 		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} | ||||
| 	case dto.MetricType_UNTYPED: | ||||
| 		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} | ||||
| 	case dto.MetricType_SUMMARY: | ||||
| 		// *sigh* | ||||
| 		if p.currentMetric.Summary == nil { | ||||
| 			p.currentMetric.Summary = &dto.Summary{} | ||||
| 		} | ||||
| 		switch { | ||||
| 		case p.currentIsSummaryCount: | ||||
| 			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) | ||||
| 		case p.currentIsSummarySum: | ||||
| 			p.currentMetric.Summary.SampleSum = proto.Float64(value) | ||||
| 		case !math.IsNaN(p.currentQuantile): | ||||
| 			p.currentMetric.Summary.Quantile = append( | ||||
| 				p.currentMetric.Summary.Quantile, | ||||
| 				&dto.Quantile{ | ||||
| 					Quantile: proto.Float64(p.currentQuantile), | ||||
| 					Value:    proto.Float64(value), | ||||
| 				}, | ||||
| 			) | ||||
| 		} | ||||
| 	case dto.MetricType_HISTOGRAM: | ||||
| 		// *sigh* | ||||
| 		if p.currentMetric.Histogram == nil { | ||||
| 			p.currentMetric.Histogram = &dto.Histogram{} | ||||
| 		} | ||||
| 		switch { | ||||
| 		case p.currentIsHistogramCount: | ||||
| 			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) | ||||
| 		case p.currentIsHistogramSum: | ||||
| 			p.currentMetric.Histogram.SampleSum = proto.Float64(value) | ||||
| 		case !math.IsNaN(p.currentBucket): | ||||
| 			p.currentMetric.Histogram.Bucket = append( | ||||
| 				p.currentMetric.Histogram.Bucket, | ||||
| 				&dto.Bucket{ | ||||
| 					UpperBound:      proto.Float64(p.currentBucket), | ||||
| 					CumulativeCount: proto.Uint64(uint64(value)), | ||||
| 				}, | ||||
| 			) | ||||
| 		} | ||||
| 	default: | ||||
| 		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) | ||||
| 	} | ||||
| 	if p.currentByte == '\n' { | ||||
| 		return p.startOfLine | ||||
| 	} | ||||
| 	return p.startTimestamp | ||||
| } | ||||
|  | ||||
| // startTimestamp represents the state where the next byte read from p.buf is | ||||
| // the start of the timestamp (or whitespace leading up to it). | ||||
| func (p *TextParser) startTimestamp() stateFn { | ||||
| 	if p.skipBlankTab(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.readTokenUntilWhitespace(); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) | ||||
| 	if err != nil { | ||||
| 		// Create a more helpful error message. | ||||
| 		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.currentMetric.TimestampMs = proto.Int64(timestamp) | ||||
| 	if p.readTokenUntilNewline(false); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	if p.currentToken.Len() > 0 { | ||||
| 		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return p.startOfLine | ||||
| } | ||||
|  | ||||
| // readingHelp represents the state where the last byte read (now in | ||||
| // p.currentByte) is the first byte of the docstring after 'HELP'. | ||||
| func (p *TextParser) readingHelp() stateFn { | ||||
| 	if p.currentMF.Help != nil { | ||||
| 		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Rest of line is the docstring. | ||||
| 	if p.readTokenUntilNewline(true); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	p.currentMF.Help = proto.String(p.currentToken.String()) | ||||
| 	return p.startOfLine | ||||
| } | ||||
|  | ||||
| // readingType represents the state where the last byte read (now in | ||||
| // p.currentByte) is the first byte of the type hint after 'HELP'. | ||||
| func (p *TextParser) readingType() stateFn { | ||||
| 	if p.currentMF.Type != nil { | ||||
| 		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Rest of line is the type. | ||||
| 	if p.readTokenUntilNewline(false); p.err != nil { | ||||
| 		return nil // Unexpected end of input. | ||||
| 	} | ||||
| 	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] | ||||
| 	if !ok { | ||||
| 		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.currentMF.Type = dto.MetricType(metricType).Enum() | ||||
| 	return p.startOfLine | ||||
| } | ||||
|  | ||||
| // parseError sets p.err to a ParseError at the current line with the given | ||||
| // message. | ||||
| func (p *TextParser) parseError(msg string) { | ||||
| 	p.err = ParseError{ | ||||
| 		Line: p.lineCount, | ||||
| 		Msg:  msg, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte | ||||
| // that is neither ' ' nor '\t'. That byte is left in p.currentByte. | ||||
| func (p *TextParser) skipBlankTab() { | ||||
| 	for { | ||||
| 		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do | ||||
| // anything if p.currentByte is neither ' ' nor '\t'. | ||||
| func (p *TextParser) skipBlankTabIfCurrentBlankTab() { | ||||
| 	if isBlankOrTab(p.currentByte) { | ||||
| 		p.skipBlankTab() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The | ||||
| // first byte considered is the byte already read (now in p.currentByte).  The | ||||
| // first whitespace byte encountered is still copied into p.currentByte, but not | ||||
| // into p.currentToken. | ||||
| func (p *TextParser) readTokenUntilWhitespace() { | ||||
| 	p.currentToken.Reset() | ||||
| 	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { | ||||
| 		p.currentToken.WriteByte(p.currentByte) | ||||
| 		p.currentByte, p.err = p.buf.ReadByte() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first | ||||
| // byte considered is the byte already read (now in p.currentByte).  The first | ||||
| // newline byte encountered is still copied into p.currentByte, but not into | ||||
| // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are | ||||
| // recognized: '\\' translates into '\', and '\n' into a line-feed character. | ||||
| // All other escape sequences are invalid and cause an error. | ||||
| func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { | ||||
| 	p.currentToken.Reset() | ||||
| 	escaped := false | ||||
| 	for p.err == nil { | ||||
| 		if recognizeEscapeSequence && escaped { | ||||
| 			switch p.currentByte { | ||||
| 			case '\\': | ||||
| 				p.currentToken.WriteByte(p.currentByte) | ||||
| 			case 'n': | ||||
| 				p.currentToken.WriteByte('\n') | ||||
| 			default: | ||||
| 				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) | ||||
| 				return | ||||
| 			} | ||||
| 			escaped = false | ||||
| 		} else { | ||||
| 			switch p.currentByte { | ||||
| 			case '\n': | ||||
| 				return | ||||
| 			case '\\': | ||||
| 				escaped = true | ||||
| 			default: | ||||
| 				p.currentToken.WriteByte(p.currentByte) | ||||
| 			} | ||||
| 		} | ||||
| 		p.currentByte, p.err = p.buf.ReadByte() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readTokenAsMetricName copies a metric name from p.buf into p.currentToken. | ||||
| // The first byte considered is the byte already read (now in p.currentByte). | ||||
| // The first byte not part of a metric name is still copied into p.currentByte, | ||||
| // but not into p.currentToken. | ||||
| func (p *TextParser) readTokenAsMetricName() { | ||||
| 	p.currentToken.Reset() | ||||
| 	if !isValidMetricNameStart(p.currentByte) { | ||||
| 		return | ||||
| 	} | ||||
| 	for { | ||||
| 		p.currentToken.WriteByte(p.currentByte) | ||||
| 		p.currentByte, p.err = p.buf.ReadByte() | ||||
| 		if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readTokenAsLabelName copies a label name from p.buf into p.currentToken. | ||||
| // The first byte considered is the byte already read (now in p.currentByte). | ||||
| // The first byte not part of a label name is still copied into p.currentByte, | ||||
| // but not into p.currentToken. | ||||
| func (p *TextParser) readTokenAsLabelName() { | ||||
| 	p.currentToken.Reset() | ||||
| 	if !isValidLabelNameStart(p.currentByte) { | ||||
| 		return | ||||
| 	} | ||||
| 	for { | ||||
| 		p.currentToken.WriteByte(p.currentByte) | ||||
| 		p.currentByte, p.err = p.buf.ReadByte() | ||||
| 		if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readTokenAsLabelValue copies a label value from p.buf into p.currentToken. | ||||
| // In contrast to the other 'readTokenAs...' functions, which start with the | ||||
| // last read byte in p.currentByte, this method ignores p.currentByte and starts | ||||
| // with reading a new byte from p.buf. The first byte not part of a label value | ||||
| // is still copied into p.currentByte, but not into p.currentToken. | ||||
| func (p *TextParser) readTokenAsLabelValue() { | ||||
| 	p.currentToken.Reset() | ||||
| 	escaped := false | ||||
| 	for { | ||||
| 		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		if escaped { | ||||
| 			switch p.currentByte { | ||||
| 			case '"', '\\': | ||||
| 				p.currentToken.WriteByte(p.currentByte) | ||||
| 			case 'n': | ||||
| 				p.currentToken.WriteByte('\n') | ||||
| 			default: | ||||
| 				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) | ||||
| 				return | ||||
| 			} | ||||
| 			escaped = false | ||||
| 			continue | ||||
| 		} | ||||
| 		switch p.currentByte { | ||||
| 		case '"': | ||||
| 			return | ||||
| 		case '\n': | ||||
| 			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) | ||||
| 			return | ||||
| 		case '\\': | ||||
| 			escaped = true | ||||
| 		default: | ||||
| 			p.currentToken.WriteByte(p.currentByte) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *TextParser) setOrCreateCurrentMF() { | ||||
| 	p.currentIsSummaryCount = false | ||||
| 	p.currentIsSummarySum = false | ||||
| 	p.currentIsHistogramCount = false | ||||
| 	p.currentIsHistogramSum = false | ||||
| 	name := p.currentToken.String() | ||||
| 	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	// Try out if this is a _sum or _count for a summary/histogram. | ||||
| 	summaryName := summaryMetricName(name) | ||||
| 	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { | ||||
| 		if p.currentMF.GetType() == dto.MetricType_SUMMARY { | ||||
| 			if isCount(name) { | ||||
| 				p.currentIsSummaryCount = true | ||||
| 			} | ||||
| 			if isSum(name) { | ||||
| 				p.currentIsSummarySum = true | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	histogramName := histogramMetricName(name) | ||||
| 	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { | ||||
| 		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | ||||
| 			if isCount(name) { | ||||
| 				p.currentIsHistogramCount = true | ||||
| 			} | ||||
| 			if isSum(name) { | ||||
| 				p.currentIsHistogramSum = true | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	p.currentMF = &dto.MetricFamily{Name: proto.String(name)} | ||||
| 	p.metricFamiliesByName[name] = p.currentMF | ||||
| } | ||||
|  | ||||
| func isValidLabelNameStart(b byte) bool { | ||||
| 	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' | ||||
| } | ||||
|  | ||||
| func isValidLabelNameContinuation(b byte) bool { | ||||
| 	return isValidLabelNameStart(b) || (b >= '0' && b <= '9') | ||||
| } | ||||
|  | ||||
| func isValidMetricNameStart(b byte) bool { | ||||
| 	return isValidLabelNameStart(b) || b == ':' | ||||
| } | ||||
|  | ||||
| func isValidMetricNameContinuation(b byte) bool { | ||||
| 	return isValidLabelNameContinuation(b) || b == ':' | ||||
| } | ||||
|  | ||||
| func isBlankOrTab(b byte) bool { | ||||
| 	return b == ' ' || b == '\t' | ||||
| } | ||||
|  | ||||
| func isCount(name string) bool { | ||||
| 	return len(name) > 6 && name[len(name)-6:] == "_count" | ||||
| } | ||||
|  | ||||
| func isSum(name string) bool { | ||||
| 	return len(name) > 4 && name[len(name)-4:] == "_sum" | ||||
| } | ||||
|  | ||||
| func isBucket(name string) bool { | ||||
| 	return len(name) > 7 && name[len(name)-7:] == "_bucket" | ||||
| } | ||||
|  | ||||
| func summaryMetricName(name string) string { | ||||
| 	switch { | ||||
| 	case isCount(name): | ||||
| 		return name[:len(name)-6] | ||||
| 	case isSum(name): | ||||
| 		return name[:len(name)-4] | ||||
| 	default: | ||||
| 		return name | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func histogramMetricName(name string) string { | ||||
| 	switch { | ||||
| 	case isCount(name): | ||||
| 		return name[:len(name)-6] | ||||
| 	case isSum(name): | ||||
| 		return name[:len(name)-4] | ||||
| 	case isBucket(name): | ||||
| 		return name[:len(name)-7] | ||||
| 	default: | ||||
| 		return name | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func parseFloat(s string) (float64, error) { | ||||
| 	if strings.ContainsAny(s, "pP_") { | ||||
| 		return 0, fmt.Errorf("unsupported character in float") | ||||
| 	} | ||||
| 	return strconv.ParseFloat(s, 64) | ||||
| } | ||||
							
								
								
									
										67
									
								
								vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										67
									
								
								vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,67 +0,0 @@ | ||||
| PACKAGE | ||||
|  | ||||
| package goautoneg | ||||
| import "bitbucket.org/ww/goautoneg" | ||||
|  | ||||
| HTTP Content-Type Autonegotiation. | ||||
|  | ||||
| The functions in this package implement the behaviour specified in | ||||
| http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html | ||||
|  | ||||
| Copyright (c) 2011, Open Knowledge Foundation Ltd. | ||||
| All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|     Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|  | ||||
|     Redistributions in binary form must reproduce the above copyright | ||||
|     notice, this list of conditions and the following disclaimer in | ||||
|     the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|  | ||||
|     Neither the name of the Open Knowledge Foundation Ltd. nor the | ||||
|     names of its contributors may be used to endorse or promote | ||||
|     products derived from this software without specific prior written | ||||
|     permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| FUNCTIONS | ||||
|  | ||||
| func Negotiate(header string, alternatives []string) (content_type string) | ||||
| Negotiate the most appropriate content_type given the accept header | ||||
| and a list of alternatives. | ||||
|  | ||||
| func ParseAccept(header string) (accept []Accept) | ||||
| Parse an Accept Header string returning a sorted list | ||||
| of clauses | ||||
|  | ||||
|  | ||||
| TYPES | ||||
|  | ||||
| type Accept struct { | ||||
|     Type, SubType string | ||||
|     Q             float32 | ||||
|     Params        map[string]string | ||||
| } | ||||
| Structure to represent a clause in an HTTP Accept Header | ||||
|  | ||||
|  | ||||
| SUBDIRECTORIES | ||||
|  | ||||
| 	.hg | ||||
							
								
								
									
										162
									
								
								vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										162
									
								
								vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,162 +0,0 @@ | ||||
| /* | ||||
| Copyright (c) 2011, Open Knowledge Foundation Ltd. | ||||
| All rights reserved. | ||||
|  | ||||
| HTTP Content-Type Autonegotiation. | ||||
|  | ||||
| The functions in this package implement the behaviour specified in | ||||
| http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|     Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|  | ||||
|     Redistributions in binary form must reproduce the above copyright | ||||
|     notice, this list of conditions and the following disclaimer in | ||||
|     the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|  | ||||
|     Neither the name of the Open Knowledge Foundation Ltd. nor the | ||||
|     names of its contributors may be used to endorse or promote | ||||
|     products derived from this software without specific prior written | ||||
|     permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| */ | ||||
| package goautoneg | ||||
|  | ||||
| import ( | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Structure to represent a clause in an HTTP Accept Header | ||||
| type Accept struct { | ||||
| 	Type, SubType string | ||||
| 	Q             float64 | ||||
| 	Params        map[string]string | ||||
| } | ||||
|  | ||||
| // For internal use, so that we can use the sort interface | ||||
| type accept_slice []Accept | ||||
|  | ||||
| func (accept accept_slice) Len() int { | ||||
| 	slice := []Accept(accept) | ||||
| 	return len(slice) | ||||
| } | ||||
|  | ||||
| func (accept accept_slice) Less(i, j int) bool { | ||||
| 	slice := []Accept(accept) | ||||
| 	ai, aj := slice[i], slice[j] | ||||
| 	if ai.Q > aj.Q { | ||||
| 		return true | ||||
| 	} | ||||
| 	if ai.Type != "*" && aj.Type == "*" { | ||||
| 		return true | ||||
| 	} | ||||
| 	if ai.SubType != "*" && aj.SubType == "*" { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (accept accept_slice) Swap(i, j int) { | ||||
| 	slice := []Accept(accept) | ||||
| 	slice[i], slice[j] = slice[j], slice[i] | ||||
| } | ||||
|  | ||||
| // Parse an Accept Header string returning a sorted list | ||||
| // of clauses | ||||
| func ParseAccept(header string) (accept []Accept) { | ||||
| 	parts := strings.Split(header, ",") | ||||
| 	accept = make([]Accept, 0, len(parts)) | ||||
| 	for _, part := range parts { | ||||
| 		part := strings.Trim(part, " ") | ||||
|  | ||||
| 		a := Accept{} | ||||
| 		a.Params = make(map[string]string) | ||||
| 		a.Q = 1.0 | ||||
|  | ||||
| 		mrp := strings.Split(part, ";") | ||||
|  | ||||
| 		media_range := mrp[0] | ||||
| 		sp := strings.Split(media_range, "/") | ||||
| 		a.Type = strings.Trim(sp[0], " ") | ||||
|  | ||||
| 		switch { | ||||
| 		case len(sp) == 1 && a.Type == "*": | ||||
| 			a.SubType = "*" | ||||
| 		case len(sp) == 2: | ||||
| 			a.SubType = strings.Trim(sp[1], " ") | ||||
| 		default: | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if len(mrp) == 1 { | ||||
| 			accept = append(accept, a) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		for _, param := range mrp[1:] { | ||||
| 			sp := strings.SplitN(param, "=", 2) | ||||
| 			if len(sp) != 2 { | ||||
| 				continue | ||||
| 			} | ||||
| 			token := strings.Trim(sp[0], " ") | ||||
| 			if token == "q" { | ||||
| 				a.Q, _ = strconv.ParseFloat(sp[1], 32) | ||||
| 			} else { | ||||
| 				a.Params[token] = strings.Trim(sp[1], " ") | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		accept = append(accept, a) | ||||
| 	} | ||||
|  | ||||
| 	slice := accept_slice(accept) | ||||
| 	sort.Sort(slice) | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Negotiate the most appropriate content_type given the accept header | ||||
| // and a list of alternatives. | ||||
| func Negotiate(header string, alternatives []string) (content_type string) { | ||||
| 	asp := make([][]string, 0, len(alternatives)) | ||||
| 	for _, ctype := range alternatives { | ||||
| 		asp = append(asp, strings.SplitN(ctype, "/", 2)) | ||||
| 	} | ||||
| 	for _, clause := range ParseAccept(header) { | ||||
| 		for i, ctsp := range asp { | ||||
| 			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { | ||||
| 				content_type = alternatives[i] | ||||
| 				return | ||||
| 			} | ||||
| 			if clause.Type == ctsp[0] && clause.SubType == "*" { | ||||
| 				content_type = alternatives[i] | ||||
| 				return | ||||
| 			} | ||||
| 			if clause.Type == "*" && clause.SubType == "*" { | ||||
| 				content_type = alternatives[i] | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										136
									
								
								vendor/github.com/prometheus/common/model/alert.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										136
									
								
								vendor/github.com/prometheus/common/model/alert.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,136 +0,0 @@ | ||||
| // Copyright 2013 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package model | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| type AlertStatus string | ||||
|  | ||||
| const ( | ||||
| 	AlertFiring   AlertStatus = "firing" | ||||
| 	AlertResolved AlertStatus = "resolved" | ||||
| ) | ||||
|  | ||||
| // Alert is a generic representation of an alert in the Prometheus eco-system. | ||||
| type Alert struct { | ||||
| 	// Label value pairs for purpose of aggregation, matching, and disposition | ||||
| 	// dispatching. This must minimally include an "alertname" label. | ||||
| 	Labels LabelSet `json:"labels"` | ||||
|  | ||||
| 	// Extra key/value information which does not define alert identity. | ||||
| 	Annotations LabelSet `json:"annotations"` | ||||
|  | ||||
| 	// The known time range for this alert. Both ends are optional. | ||||
| 	StartsAt     time.Time `json:"startsAt,omitempty"` | ||||
| 	EndsAt       time.Time `json:"endsAt,omitempty"` | ||||
| 	GeneratorURL string    `json:"generatorURL"` | ||||
| } | ||||
|  | ||||
| // Name returns the name of the alert. It is equivalent to the "alertname" label. | ||||
| func (a *Alert) Name() string { | ||||
| 	return string(a.Labels[AlertNameLabel]) | ||||
| } | ||||
|  | ||||
| // Fingerprint returns a unique hash for the alert. It is equivalent to | ||||
| // the fingerprint of the alert's label set. | ||||
| func (a *Alert) Fingerprint() Fingerprint { | ||||
| 	return a.Labels.Fingerprint() | ||||
| } | ||||
|  | ||||
| func (a *Alert) String() string { | ||||
| 	s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) | ||||
| 	if a.Resolved() { | ||||
| 		return s + "[resolved]" | ||||
| 	} | ||||
| 	return s + "[active]" | ||||
| } | ||||
|  | ||||
| // Resolved returns true iff the activity interval ended in the past. | ||||
| func (a *Alert) Resolved() bool { | ||||
| 	return a.ResolvedAt(time.Now()) | ||||
| } | ||||
|  | ||||
| // ResolvedAt returns true off the activity interval ended before | ||||
| // the given timestamp. | ||||
| func (a *Alert) ResolvedAt(ts time.Time) bool { | ||||
| 	if a.EndsAt.IsZero() { | ||||
| 		return false | ||||
| 	} | ||||
| 	return !a.EndsAt.After(ts) | ||||
| } | ||||
|  | ||||
| // Status returns the status of the alert. | ||||
| func (a *Alert) Status() AlertStatus { | ||||
| 	if a.Resolved() { | ||||
| 		return AlertResolved | ||||
| 	} | ||||
| 	return AlertFiring | ||||
| } | ||||
|  | ||||
| // Validate checks whether the alert data is inconsistent. | ||||
| func (a *Alert) Validate() error { | ||||
| 	if a.StartsAt.IsZero() { | ||||
| 		return fmt.Errorf("start time missing") | ||||
| 	} | ||||
| 	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { | ||||
| 		return fmt.Errorf("start time must be before end time") | ||||
| 	} | ||||
| 	if err := a.Labels.Validate(); err != nil { | ||||
| 		return fmt.Errorf("invalid label set: %s", err) | ||||
| 	} | ||||
| 	if len(a.Labels) == 0 { | ||||
| 		return fmt.Errorf("at least one label pair required") | ||||
| 	} | ||||
| 	if err := a.Annotations.Validate(); err != nil { | ||||
| 		return fmt.Errorf("invalid annotations: %s", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Alert is a list of alerts that can be sorted in chronological order. | ||||
| type Alerts []*Alert | ||||
|  | ||||
| func (as Alerts) Len() int      { return len(as) } | ||||
| func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } | ||||
|  | ||||
| func (as Alerts) Less(i, j int) bool { | ||||
| 	if as[i].StartsAt.Before(as[j].StartsAt) { | ||||
| 		return true | ||||
| 	} | ||||
| 	if as[i].EndsAt.Before(as[j].EndsAt) { | ||||
| 		return true | ||||
| 	} | ||||
| 	return as[i].Fingerprint() < as[j].Fingerprint() | ||||
| } | ||||
|  | ||||
| // HasFiring returns true iff one of the alerts is not resolved. | ||||
| func (as Alerts) HasFiring() bool { | ||||
| 	for _, a := range as { | ||||
| 		if !a.Resolved() { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Status returns StatusFiring iff at least one of the alerts is firing. | ||||
| func (as Alerts) Status() AlertStatus { | ||||
| 	if as.HasFiring() { | ||||
| 		return AlertFiring | ||||
| 	} | ||||
| 	return AlertResolved | ||||
| } | ||||
							
								
								
									
										105
									
								
								vendor/github.com/prometheus/common/model/fingerprinting.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										105
									
								
								vendor/github.com/prometheus/common/model/fingerprinting.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,105 +0,0 @@ | ||||
| // Copyright 2013 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package model | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // Fingerprint provides a hash-capable representation of a Metric. | ||||
| // For our purposes, FNV-1A 64-bit is used. | ||||
| type Fingerprint uint64 | ||||
|  | ||||
| // FingerprintFromString transforms a string representation into a Fingerprint. | ||||
| func FingerprintFromString(s string) (Fingerprint, error) { | ||||
| 	num, err := strconv.ParseUint(s, 16, 64) | ||||
| 	return Fingerprint(num), err | ||||
| } | ||||
|  | ||||
| // ParseFingerprint parses the input string into a fingerprint. | ||||
| func ParseFingerprint(s string) (Fingerprint, error) { | ||||
| 	num, err := strconv.ParseUint(s, 16, 64) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return Fingerprint(num), nil | ||||
| } | ||||
|  | ||||
| func (f Fingerprint) String() string { | ||||
| 	return fmt.Sprintf("%016x", uint64(f)) | ||||
| } | ||||
|  | ||||
| // Fingerprints represents a collection of Fingerprint subject to a given | ||||
| // natural sorting scheme. It implements sort.Interface. | ||||
| type Fingerprints []Fingerprint | ||||
|  | ||||
| // Len implements sort.Interface. | ||||
| func (f Fingerprints) Len() int { | ||||
| 	return len(f) | ||||
| } | ||||
|  | ||||
| // Less implements sort.Interface. | ||||
| func (f Fingerprints) Less(i, j int) bool { | ||||
| 	return f[i] < f[j] | ||||
| } | ||||
|  | ||||
| // Swap implements sort.Interface. | ||||
| func (f Fingerprints) Swap(i, j int) { | ||||
| 	f[i], f[j] = f[j], f[i] | ||||
| } | ||||
|  | ||||
| // FingerprintSet is a set of Fingerprints. | ||||
| type FingerprintSet map[Fingerprint]struct{} | ||||
|  | ||||
| // Equal returns true if both sets contain the same elements (and not more). | ||||
| func (s FingerprintSet) Equal(o FingerprintSet) bool { | ||||
| 	if len(s) != len(o) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	for k := range s { | ||||
| 		if _, ok := o[k]; !ok { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Intersection returns the elements contained in both sets. | ||||
| func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { | ||||
| 	myLength, otherLength := len(s), len(o) | ||||
| 	if myLength == 0 || otherLength == 0 { | ||||
| 		return FingerprintSet{} | ||||
| 	} | ||||
|  | ||||
| 	subSet := s | ||||
| 	superSet := o | ||||
|  | ||||
| 	if otherLength < myLength { | ||||
| 		subSet = o | ||||
| 		superSet = s | ||||
| 	} | ||||
|  | ||||
| 	out := FingerprintSet{} | ||||
|  | ||||
| 	for k := range subSet { | ||||
| 		if _, ok := superSet[k]; ok { | ||||
| 			out[k] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return out | ||||
| } | ||||
							
								
								
									
										42
									
								
								vendor/github.com/prometheus/common/model/fnv.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/prometheus/common/model/fnv.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,42 +0,0 @@ | ||||
| // Copyright 2015 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package model | ||||
|  | ||||
| // Inline and byte-free variant of hash/fnv's fnv64a. | ||||
|  | ||||
| const ( | ||||
| 	offset64 = 14695981039346656037 | ||||
| 	prime64  = 1099511628211 | ||||
| ) | ||||
|  | ||||
| // hashNew initializes a new fnv64a hash value. | ||||
| func hashNew() uint64 { | ||||
| 	return offset64 | ||||
| } | ||||
|  | ||||
| // hashAdd adds a string to a fnv64a hash value, returning the updated hash. | ||||
| func hashAdd(h uint64, s string) uint64 { | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		h ^= uint64(s[i]) | ||||
| 		h *= prime64 | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. | ||||
| func hashAddByte(h uint64, b byte) uint64 { | ||||
| 	h ^= uint64(b) | ||||
| 	h *= prime64 | ||||
| 	return h | ||||
| } | ||||
							
								
								
									
										210
									
								
								vendor/github.com/prometheus/common/model/labels.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										210
									
								
								vendor/github.com/prometheus/common/model/labels.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -1,210 +0,0 @@ | ||||
| // Copyright 2013 The Prometheus Authors | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package model | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// AlertNameLabel is the name of the label containing the an alert's name. | ||||
| 	AlertNameLabel = "alertname" | ||||
|  | ||||
| 	// ExportedLabelPrefix is the prefix to prepend to the label names present in | ||||
| 	// exported metrics if a label of the same name is added by the server. | ||||
| 	ExportedLabelPrefix = "exported_" | ||||
|  | ||||
| 	// MetricNameLabel is the label name indicating the metric name of a | ||||
| 	// timeseries. | ||||
| 	MetricNameLabel = "__name__" | ||||
|  | ||||
| 	// SchemeLabel is the name of the label that holds the scheme on which to | ||||
| 	// scrape a target. | ||||
| 	SchemeLabel = "__scheme__" | ||||
|  | ||||
| 	// AddressLabel is the name of the label that holds the address of | ||||
| 	// a scrape target. | ||||
| 	AddressLabel = "__address__" | ||||
|  | ||||
| 	// MetricsPathLabel is the name of the label that holds the path on which to | ||||
| 	// scrape a target. | ||||
| 	MetricsPathLabel = "__metrics_path__" | ||||
|  | ||||
| 	// ReservedLabelPrefix is a prefix which is not legal in user-supplied | ||||
| 	// label names. | ||||
| 	ReservedLabelPrefix = "__" | ||||
|  | ||||
| 	// MetaLabelPrefix is a prefix for labels that provide meta information. | ||||
| 	// Labels with this prefix are used for intermediate label processing and | ||||
| 	// will not be attached to time series. | ||||
| 	MetaLabelPrefix = "__meta_" | ||||
|  | ||||
| 	// TmpLabelPrefix is a prefix for temporary labels as part of relabelling. | ||||
| 	// Labels with this prefix are used for intermediate label processing and | ||||
| 	// will not be attached to time series. This is reserved for use in | ||||
| 	// Prometheus configuration files by users. | ||||
| 	TmpLabelPrefix = "__tmp_" | ||||
|  | ||||
| 	// ParamLabelPrefix is a prefix for labels that provide URL parameters | ||||
| 	// used to scrape a target. | ||||
| 	ParamLabelPrefix = "__param_" | ||||
|  | ||||
| 	// JobLabel is the label name indicating the job from which a timeseries | ||||
| 	// was scraped. | ||||
| 	JobLabel = "job" | ||||
|  | ||||
| 	// InstanceLabel is the label name used for the instance label. | ||||
| 	InstanceLabel = "instance" | ||||
|  | ||||
| 	// BucketLabel is used for the label that defines the upper bound of a | ||||
| 	// bucket of a histogram ("le" -> "less or equal"). | ||||
| 	BucketLabel = "le" | ||||
|  | ||||
| 	// QuantileLabel is used for the label that defines the quantile in a | ||||
| 	// summary. | ||||
| 	QuantileLabel = "quantile" | ||||
| ) | ||||
|  | ||||
| // LabelNameRE is a regular expression matching valid label names. Note that the | ||||
| // IsValid method of LabelName performs the same check but faster than a match | ||||
| // with this regular expression. | ||||
| var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") | ||||
|  | ||||
| // A LabelName is a key for a LabelSet or Metric.  It has a value associated | ||||
| // therewith. | ||||
| type LabelName string | ||||
|  | ||||
| // IsValid is true iff the label name matches the pattern of LabelNameRE. This | ||||
| // method, however, does not use LabelNameRE for the check but a much faster | ||||
| // hardcoded implementation. | ||||
| func (ln LabelName) IsValid() bool { | ||||
| 	if len(ln) == 0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, b := range ln { | ||||
| 		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // UnmarshalYAML implements the yaml.Unmarshaler interface. | ||||
| func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { | ||||
| 	var s string | ||||
| 	if err := unmarshal(&s); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if !LabelName(s).IsValid() { | ||||
| 		return fmt.Errorf("%q is not a valid label name", s) | ||||
| 	} | ||||
| 	*ln = LabelName(s) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // UnmarshalJSON implements the json.Unmarshaler interface. | ||||
| func (ln *LabelName) UnmarshalJSON(b []byte) error { | ||||
| 	var s string | ||||
| 	if err := json.Unmarshal(b, &s); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if !LabelName(s).IsValid() { | ||||
| 		return fmt.Errorf("%q is not a valid label name", s) | ||||
| 	} | ||||
| 	*ln = LabelName(s) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // LabelNames is a sortable LabelName slice. In implements sort.Interface. | ||||
| type LabelNames []LabelName | ||||
|  | ||||
| func (l LabelNames) Len() int { | ||||
| 	return len(l) | ||||
| } | ||||
|  | ||||
| func (l LabelNames) Less(i, j int) bool { | ||||
| 	return l[i] < l[j] | ||||
| } | ||||
|  | ||||
| func (l LabelNames) Swap(i, j int) { | ||||
| 	l[i], l[j] = l[j], l[i] | ||||
| } | ||||
|  | ||||
| func (l LabelNames) String() string { | ||||
| 	labelStrings := make([]string, 0, len(l)) | ||||
| 	for _, label := range l { | ||||
| 		labelStrings = append(labelStrings, string(label)) | ||||
| 	} | ||||
| 	return strings.Join(labelStrings, ", ") | ||||
| } | ||||
|  | ||||
| // A LabelValue is an associated value for a LabelName. | ||||
| type LabelValue string | ||||
|  | ||||
| // IsValid returns true iff the string is a valid UTF8. | ||||
| func (lv LabelValue) IsValid() bool { | ||||
| 	return utf8.ValidString(string(lv)) | ||||
| } | ||||
|  | ||||
| // LabelValues is a sortable LabelValue slice. It implements sort.Interface. | ||||
| type LabelValues []LabelValue | ||||
|  | ||||
| func (l LabelValues) Len() int { | ||||
| 	return len(l) | ||||
| } | ||||
|  | ||||
| func (l LabelValues) Less(i, j int) bool { | ||||
| 	return string(l[i]) < string(l[j]) | ||||
| } | ||||
|  | ||||
| func (l LabelValues) Swap(i, j int) { | ||||
| 	l[i], l[j] = l[j], l[i] | ||||
| } | ||||
|  | ||||
| // LabelPair pairs a name with a value. | ||||
| type LabelPair struct { | ||||
| 	Name  LabelName | ||||
| 	Value LabelValue | ||||
| } | ||||
|  | ||||
| // LabelPairs is a sortable slice of LabelPair pointers. It implements | ||||
| // sort.Interface. | ||||
| type LabelPairs []*LabelPair | ||||
|  | ||||
| func (l LabelPairs) Len() int { | ||||
| 	return len(l) | ||||
| } | ||||
|  | ||||
| func (l LabelPairs) Less(i, j int) bool { | ||||
| 	switch { | ||||
| 	case l[i].Name > l[j].Name: | ||||
| 		return false | ||||
| 	case l[i].Name < l[j].Name: | ||||
| 		return true | ||||
| 	case l[i].Value > l[j].Value: | ||||
| 		return false | ||||
| 	case l[i].Value < l[j].Value: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (l LabelPairs) Swap(i, j int) { | ||||
| 	l[i], l[j] = l[j], l[i] | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user