From a4511100f3f85f227d35d4036803cfdaf58eb46d Mon Sep 17 00:00:00 2001 From: hvi Date: Fri, 7 Jul 2017 14:48:06 +0200 Subject: [PATCH 01/16] Cobra init octopus --- octopus/LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++ octopus/cmd/root.go | 78 +++++++++++++++++ octopus/main.go | 21 +++++ 3 files changed, 301 insertions(+) create mode 100644 octopus/LICENSE create mode 100644 octopus/cmd/root.go create mode 100644 octopus/main.go diff --git a/octopus/LICENSE b/octopus/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/cmd/root.go b/octopus/cmd/root.go new file mode 100644 index 0000000..5a69213 --- /dev/null +++ b/octopus/cmd/root.go @@ -0,0 +1,78 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var cfgFile string + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "octopus", + Short: "A brief description of your application", + Long: `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, +// Uncomment the following line if your bare application +// has an action associated with it: +// Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} + +func init() { + cobra.OnInitialize(initConfig) + + // Here you will define your flags and configuration settings. + // Cobra supports Persistent Flags, which, if defined here, + // will be global for your application. + + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.octopus.yaml)") + // Cobra also supports local flags, which will only run + // when this action is called directly. + RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { // enable ability to specify config file via flag + viper.SetConfigFile(cfgFile) + } + + viper.SetConfigName(".octopus") // name of config file (without extension) + viper.AddConfigPath("$HOME") // adding home directory as first search path + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} diff --git a/octopus/main.go b/octopus/main.go new file mode 100644 index 0000000..f366693 --- /dev/null +++ b/octopus/main.go @@ -0,0 +1,21 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/lesfurets/git-octopus/octopus/cmd" + +func main() { + cmd.Execute() +} From 2b348350b4d7aeaf988a43b5fffcf95bb1ba52e0 Mon Sep 17 00:00:00 2001 From: hvi Date: Fri, 7 Jul 2017 14:48:38 +0200 Subject: [PATCH 02/16] Cobra add merge * This command `octopus merge` will be used to replace the old `git-octopus` command, with a more human readable syntax (ala git). --- octopus/cmd/merge.go | 54 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 octopus/cmd/merge.go diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go new file mode 100644 index 0000000..09b21bd --- /dev/null +++ b/octopus/cmd/merge.go @@ -0,0 +1,54 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// mergeCmd represents the merge command +var mergeCmd = &cobra.Command{ + Use: "merge", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: merge, +} + +func merge(cmd *cobra.Command, args []string) { +// TODO: Work your own magic here +fmt.Println("merge called") +} + +func init() { + RootCmd.AddCommand(mergeCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // mergeCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // mergeCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + +} From d9d6b9d861d18aa7a093dbb974f615e5a6ccc24b Mon Sep 17 00:00:00 2001 From: hvi Date: Fri, 7 Jul 2017 15:24:25 +0200 Subject: [PATCH 03/16] Replace merge called behaviour with git-octopus's main function * passing args to `run.Run()` doesnt seem to work * `octopus merge branch1 branch2` seems to work --- octopus/cmd/merge.go | 48 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 09b21bd..302dffb 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -15,9 +15,12 @@ package cmd import ( - "fmt" - + "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/run" "github.com/spf13/cobra" + "log" + "os" + "os/signal" ) // mergeCmd represents the merge command @@ -34,8 +37,45 @@ to quickly create a Cobra application.`, } func merge(cmd *cobra.Command, args []string) { -// TODO: Work your own magic here -fmt.Println("merge called") + repo := git.Repository{Path: "."} + + context := run.OctopusContext{ + Repo: &repo, + Logger: log.New(os.Stdout, "", 0), + } + + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt, os.Kill) + + go handleSignals(signalChan, &context) + + err := run.Run(&context, os.Args[1:]...) + + if err != nil { + if len(err.Error()) > 0 { + log.Fatalln(err.Error()) + } + os.Exit(1) + } +} + +func handleSignals(signalChan chan os.Signal, context *run.OctopusContext) { + initialHeadCommit, _ := context.Repo.Git("rev-parse", "HEAD") + /* + The behavior of this is quite tricky. The signal is not only received on signalChan + but sent to subprocesses started by exec.Command as well. It is likely that + the main go routine is running one of those subprocess which will stop and return an error. + The error is handled by the Run function as any other error depending on where the execution was. + + In the mean time, this routine is resetting the repo. + + This is definitly an approximation that works in most cases. + */ + sig := <-signalChan + context.Logger.Printf("Signal %v\n", sig.String()) + context.Repo.Git("reset", "-q", "--hard", initialHeadCommit) + context.Repo.Git("clean", "-fd") + os.Exit(1) } func init() { From 0b13a14a89a6d20cc164f2d3353fcc4be9ffed9e Mon Sep 17 00:00:00 2001 From: hvi Date: Fri, 7 Jul 2017 19:00:57 +0200 Subject: [PATCH 04/16] Add flag declaration * report old config flags to the merge command --- octopus/cmd/merge.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 302dffb..5d14362 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -26,13 +26,9 @@ import ( // mergeCmd represents the merge command var mergeCmd = &cobra.Command{ Use: "merge", - Short: "A brief description of your command", - Long: `A longer description that spans multiple lines and likely contains examples -and usage of using your command. For example: - -Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, + Short: "A git extension to merge multiple branches", + Long: `A git extension to merge multiple branches. +TODO verbose description`, Run: merge, } @@ -81,11 +77,16 @@ func handleSignals(signalChan chan os.Signal, context *run.OctopusContext) { func init() { RootCmd.AddCommand(mergeCmd) - // Here you will define your flags and configuration settings. + // flags and configuration settings. + + // --check + mergeCmd.PersistentFlags().Bool("check", false, "checks if the merge works. Leaves the repository back to HEAD") + + // --chunk + mergeCmd.PersistentFlags().Int("chunk", 1, "do the octopus merge split by chunks of n branches") - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // mergeCmd.PersistentFlags().String("foo", "", "A help for foo") + // --exclude + mergeCmd.PersistentFlags().StringArray("exclude", nil, "excludes branches matching the pattern") // Cobra supports local flags which will only run when this command // is called directly, e.g.: From d34c9c0e9108e6ecc98664ed9fa17d51f57be7ff Mon Sep 17 00:00:00 2001 From: hvi Date: Fri, 7 Jul 2017 19:33:33 +0200 Subject: [PATCH 05/16] Migrate --version * remove and migrate to `octopus version` --- config/config.go | 5 +---- octopus/cmd/root.go | 30 +++++++++++------------------- octopus/cmd/version.go | 34 ++++++++++++++++++++++++++++++++++ octopus/version/version.go | 9 +++++++++ run/run.go | 9 +-------- 5 files changed, 56 insertions(+), 31 deletions(-) create mode 100644 octopus/cmd/version.go create mode 100644 octopus/version/version.go diff --git a/config/config.go b/config/config.go index 1bade56..75efd42 100644 --- a/config/config.go +++ b/config/config.go @@ -9,7 +9,6 @@ import ( ) type OctopusConfig struct { - PrintVersion bool DoCommit bool ChunkSize int ExcludedPatterns []string @@ -29,12 +28,11 @@ func (e *excluded_patterns) Set(value string) error { func GetOctopusConfig(repo *git.Repository, args []string) (*OctopusConfig, error) { - var printVersion, noCommitArg, commitArg bool + var noCommitArg, commitArg bool var chunkSizeArg int var excludedPatternsArg excluded_patterns var commandLine = flag.NewFlagSet("git-octopus", flag.ExitOnError) - commandLine.BoolVar(&printVersion, "v", false, "prints the version of git-octopus.") commandLine.BoolVar(&noCommitArg, "n", false, "leaves the repository back to HEAD.") commandLine.BoolVar(&commitArg, "c", false, "Commit the resulting merge in the current branch.") commandLine.IntVar(&chunkSizeArg, "s", 0, "do the octopus by chunk of n branches.") @@ -88,7 +86,6 @@ func GetOctopusConfig(repo *git.Repository, args []string) (*OctopusConfig, erro } return &OctopusConfig{ - PrintVersion: printVersion, DoCommit: configCommit, ChunkSize: chunkSizeArg, ExcludedPatterns: excludedPatterns, diff --git a/octopus/cmd/root.go b/octopus/cmd/root.go index 5a69213..76040ce 100644 --- a/octopus/cmd/root.go +++ b/octopus/cmd/root.go @@ -27,16 +27,11 @@ var cfgFile string // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "octopus", - Short: "A brief description of your application", - Long: `A longer description that spans multiple lines and likely contains -examples and usage of using your application. For example: - -Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, -// Uncomment the following line if your bare application -// has an action associated with it: -// Run: func(cmd *cobra.Command, args []string) { }, + Short: "Octopus is a git toolbox", + Long: `Octopus extends the git suite, providing various utilities such as mutliple branch merges via patterns, conflict detection, conflict resolution sharing...`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, } // Execute adds all child commands to the root command sets flags appropriately. @@ -51,25 +46,22 @@ func Execute() { func init() { cobra.OnInitialize(initConfig) - // Here you will define your flags and configuration settings. - // Cobra supports Persistent Flags, which, if defined here, - // will be global for your application. + // flags and configuration settings. + // --config RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.octopus.yaml)") - // Cobra also supports local flags, which will only run - // when this action is called directly. - RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") } // initConfig reads in config file and ENV variables if set. func initConfig() { - if cfgFile != "" { // enable ability to specify config file via flag + if cfgFile != "" { + // enable ability to specify config file via flag viper.SetConfigFile(cfgFile) } viper.SetConfigName(".octopus") // name of config file (without extension) - viper.AddConfigPath("$HOME") // adding home directory as first search path - viper.AutomaticEnv() // read in environment variables that match + viper.AddConfigPath("$HOME") // adding home directory as first search path + viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { diff --git a/octopus/cmd/version.go b/octopus/cmd/version.go new file mode 100644 index 0000000..c876a33 --- /dev/null +++ b/octopus/cmd/version.go @@ -0,0 +1,34 @@ +// Copyright © 2017 NAME HERE +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "github.com/lesfurets/git-octopus/octopus/version" + "github.com/spf13/cobra" +) + +// versionCmd represents the version command +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Octopus", + Long: `Print the version number of Octopus`, + Run: func(cmd *cobra.Command, args []string) { + version.PrintOctopusVersion() + }, +} + +func init() { + RootCmd.AddCommand(versionCmd) +} diff --git a/octopus/version/version.go b/octopus/version/version.go new file mode 100644 index 0000000..911a552 --- /dev/null +++ b/octopus/version/version.go @@ -0,0 +1,9 @@ +package version + +import "fmt" + +const VERSION = "2.0" + +func PrintOctopusVersion() { + fmt.Println(VERSION) +} diff --git a/run/run.go b/run/run.go index f13ae47..6fa22ea 100644 --- a/run/run.go +++ b/run/run.go @@ -13,8 +13,6 @@ type OctopusContext struct { Logger *log.Logger } -const VERSION = "2.0" - func Run(context *OctopusContext, args ...string) error { octopusConfig, err := config.GetOctopusConfig(context.Repo, args) @@ -23,11 +21,6 @@ func Run(context *OctopusContext, args ...string) error { return err } - if octopusConfig.PrintVersion { - context.Logger.Println(VERSION) - return nil - } - if len(octopusConfig.Patterns) == 0 { context.Logger.Println("Nothing to merge. No pattern given") return nil @@ -155,6 +148,6 @@ func octopusCommitMessage(remotes []git.LsRemoteEntry) string { for _, lsRemoteEntry := range remotes { buf.WriteString(lsRemoteEntry.Ref + "\n") } - buf.WriteString("\nCommit created by git-octopus " + VERSION + ".\n") + buf.WriteString("\nCommit created by git-octopus.\n") return buf.String() } From e8c65d4fb728a14fd29806a01035f1866572d0fb Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 00:41:30 +0200 Subject: [PATCH 06/16] Migrate run package to merge * Transpose everything in /run into octopus/merge * Remove dependencies to parent package run * Squash tests to follow go test convention 1test per file * migrate config to merge (only used by the merge command) --- octopus/cmd/merge.go | 12 +- octopus/merge/config.go | 94 ++++++++++++++ octopus/merge/config_test.go | 148 +++++++++++++++++++++ octopus/merge/matcher.go | 57 +++++++++ octopus/merge/matcher_test.go | 67 ++++++++++ octopus/merge/merge.go | 143 +++++++++++++++++++++ octopus/merge/merge_test.go | 234 ++++++++++++++++++++++++++++++++++ 7 files changed, 749 insertions(+), 6 deletions(-) create mode 100644 octopus/merge/config.go create mode 100644 octopus/merge/config_test.go create mode 100644 octopus/merge/matcher.go create mode 100644 octopus/merge/matcher_test.go create mode 100644 octopus/merge/merge.go create mode 100644 octopus/merge/merge_test.go diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 5d14362..1a5ff97 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -16,7 +16,7 @@ package cmd import ( "github.com/lesfurets/git-octopus/git" - "github.com/lesfurets/git-octopus/run" + "github.com/lesfurets/git-octopus/octopus/merge" "github.com/spf13/cobra" "log" "os" @@ -29,13 +29,13 @@ var mergeCmd = &cobra.Command{ Short: "A git extension to merge multiple branches", Long: `A git extension to merge multiple branches. TODO verbose description`, - Run: merge, + Run: runMergeCmd, } -func merge(cmd *cobra.Command, args []string) { +func runMergeCmd(cmd *cobra.Command, args []string) { repo := git.Repository{Path: "."} - context := run.OctopusContext{ + context := merge.MergeContext{ Repo: &repo, Logger: log.New(os.Stdout, "", 0), } @@ -45,7 +45,7 @@ func merge(cmd *cobra.Command, args []string) { go handleSignals(signalChan, &context) - err := run.Run(&context, os.Args[1:]...) + err := merge.Merge(&context, args) if err != nil { if len(err.Error()) > 0 { @@ -55,7 +55,7 @@ func merge(cmd *cobra.Command, args []string) { } } -func handleSignals(signalChan chan os.Signal, context *run.OctopusContext) { +func handleSignals(signalChan chan os.Signal, context *merge.MergeContext) { initialHeadCommit, _ := context.Repo.Git("rev-parse", "HEAD") /* The behavior of this is quite tricky. The signal is not only received on signalChan diff --git a/octopus/merge/config.go b/octopus/merge/config.go new file mode 100644 index 0000000..2af1972 --- /dev/null +++ b/octopus/merge/config.go @@ -0,0 +1,94 @@ +package merge + +import ( + "errors" + "flag" + "github.com/lesfurets/git-octopus/git" + "strconv" + "strings" +) + +type Config struct { + DoCommit bool + ChunkSize int + ExcludedPatterns []string + Patterns []string +} + +type excluded_patterns []string + +func (e *excluded_patterns) String() string { + return strings.Join(*e, ",") +} + +func (e *excluded_patterns) Set(value string) error { + *e = append(*e, value) + return nil +} + +func GetConfig(repo *git.Repository, args []string) (*Config, error) { + + var noCommitArg, commitArg bool + var chunkSizeArg int + var excludedPatternsArg excluded_patterns + + var commandLine = flag.NewFlagSet("git-octopus", flag.ExitOnError) + commandLine.BoolVar(&noCommitArg, "n", false, "leaves the repository back to HEAD.") + commandLine.BoolVar(&commitArg, "c", false, "Commit the resulting merge in the current branch.") + commandLine.IntVar(&chunkSizeArg, "s", 0, "do the octopus by chunk of n branches.") + commandLine.Var(&excludedPatternsArg, "e", "exclude branches matching the pattern.") + + commandLine.Parse(args) + + var configCommit bool + + rawConfigCommit, err := repo.Git("config", "octopus.commit") + + if err != nil { + configCommit = true + } else { + configCommit, err = strconv.ParseBool(rawConfigCommit) + if err != nil { + return nil, errors.New("Config octopus.commit should be boolean. Given \"" + rawConfigCommit + "\"") + } + } + + if commitArg { + configCommit = true + } + + if noCommitArg { + configCommit = false + } + + configExcludedPatterns, _ := repo.Git("config", "--get-all", "octopus.excludePattern") + + var excludedPatterns []string + + if len(configExcludedPatterns) > 0 { + excludedPatterns = strings.Split(configExcludedPatterns, "\n") + } + + if len(excludedPatternsArg) > 0 { + excludedPatterns = excludedPatternsArg + } + + configPatterns, _ := repo.Git("config", "--get-all", "octopus.pattern") + + var patterns []string + + if len(configPatterns) > 0 { + patterns = strings.Split(configPatterns, "\n") + } + + if commandLine.NArg() > 0 { + patterns = commandLine.Args() + } + + return &Config{ + DoCommit: configCommit, + ChunkSize: chunkSizeArg, + ExcludedPatterns: excludedPatterns, + Patterns: patterns, + }, nil +} diff --git a/octopus/merge/config_test.go b/octopus/merge/config_test.go new file mode 100644 index 0000000..638510a --- /dev/null +++ b/octopus/merge/config_test.go @@ -0,0 +1,148 @@ +package merge + +import ( + "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func createTestRepo() *git.Repository { + dir := test.CreateTempDir() + + repo := git.Repository{Path: dir} + + repo.Git("init") + + return &repo +} + +func TestDoCommit(t *testing.T) { + repo := createTestRepo() + defer test.Cleanup(repo) + + // GIVEN no config, no option + // WHEN + octopusConfig, err := GetConfig(repo, nil) + + // THEN doCommit should be true + assert.True(t, octopusConfig.DoCommit) + assert.Nil(t, err) + + // GIVEN config to false, no option + repo.Git("config", "octopus.commit", "false") + // WHEN + octopusConfig, err = GetConfig(repo, nil) + + // THEN doCommit should be false + assert.False(t, octopusConfig.DoCommit) + assert.Nil(t, err) + + // Config to 0, no option. doCommit should be true + repo.Git("config", "octopus.commit", "0") + octopusConfig, err = GetConfig(repo, nil) + + assert.False(t, octopusConfig.DoCommit) + assert.Nil(t, err) + + // GIVEN config to false, -c option true + repo.Git("config", "octopus.commit", "false") + // WHEN + octopusConfig, err = GetConfig(repo, []string{"-c"}) + + // THEN doCommit should be true + assert.True(t, octopusConfig.DoCommit) + assert.Nil(t, err) + + // GIVEN config to true, -n option true + repo.Git("config", "octopus.commit", "true") + // WHEN + octopusConfig, err = GetConfig(repo, []string{"-n"}) + + // THEN doCommit should be false + assert.False(t, octopusConfig.DoCommit) + assert.Nil(t, err) +} + +func TestChunkMode(t *testing.T) { + repo := createTestRepo() + defer test.Cleanup(repo) + + // GIVEN No option + // WHEN + octopusConfig, err := GetConfig(repo, nil) + + // THEN chunkSize should be 0 + assert.Equal(t, 0, octopusConfig.ChunkSize) + assert.Nil(t, err) + + // GIVEN option -s 5 + // WHEN + octopusConfig, err = GetConfig(repo, []string{"-s", "5"}) + + // THEN chunkSize should be 5 + assert.Equal(t, 5, octopusConfig.ChunkSize) + assert.Nil(t, err) +} + +func TestExcludedPatterns(t *testing.T) { + repo := createTestRepo() + defer test.Cleanup(repo) + + // GIVEN no config, no option + // WHEN + octopusConfig, err := GetConfig(repo, nil) + + // THEN excludedPatterns should be empty + assert.Empty(t, octopusConfig.ExcludedPatterns) + assert.Nil(t, err) + + // GIVEN excludePattern config, no option + repo.Git("config", "octopus.excludePattern", "excluded/*") + repo.Git("config", "--add", "octopus.excludePattern", "excluded_branch") + // WHEN + octopusConfig, err = GetConfig(repo, nil) + + // THEN excludedPatterns should be set + assert.Equal(t, []string{"excluded/*", "excluded_branch"}, octopusConfig.ExcludedPatterns) + assert.Nil(t, err) + + // GIVEN excludePattern config (from previous assertion), option given + // WHEN + octopusConfig, err = GetConfig(repo, []string{"-e", "override_excluded"}) + + // THEN option should take precedence + assert.Equal(t, []string{"override_excluded"}, octopusConfig.ExcludedPatterns) + assert.Nil(t, err) +} + +func TestPatterns(t *testing.T) { + repo := createTestRepo() + defer test.Cleanup(repo) + + // GIVEN no config, no option + // WHEN + octopusConfig, err := GetConfig(repo, nil) + + // THEN excludedPatterns should be empty + assert.Empty(t, octopusConfig.Patterns) + assert.Nil(t, err) + + // GIVEN config, no argument. + repo.Git("config", "octopus.pattern", "test") + repo.Git("config", "--add", "octopus.pattern", "test2") + // WHEN + octopusConfig, err = GetConfig(repo, nil) + + // THEN patterns should be set + assert.Equal(t, []string{"test", "test2"}, octopusConfig.Patterns) + assert.Nil(t, err) + + // GIVEN config (from previous assertion), argument given + // WHEN + octopusConfig, err = GetConfig(repo, []string{"arg1", "arg2"}) + + // THEN arguments should take precedence + assert.Equal(t, []string{"arg1", "arg2"}, octopusConfig.Patterns) + assert.Nil(t, err) +} diff --git a/octopus/merge/matcher.go b/octopus/merge/matcher.go new file mode 100644 index 0000000..7c9e4eb --- /dev/null +++ b/octopus/merge/matcher.go @@ -0,0 +1,57 @@ +package merge + +import ( + "bytes" + "fmt" + "github.com/lesfurets/git-octopus/git" + "log" + "strings" +) + +func resolveBranchList(repo *git.Repository, logger *log.Logger, patterns []string, excludedPatterns []string) []git.LsRemoteEntry { + lsRemote, _ := repo.Git(append([]string{"ls-remote", "."}, patterns...)...) + includedRefs := git.ParseLsRemote(lsRemote) + excludedRefs := []git.LsRemoteEntry{} + + totalCount := len(includedRefs) + excludedCount := 0 + + if len(excludedPatterns) > 0 { + lsRemote, _ = repo.Git(append([]string{"ls-remote", "."}, excludedPatterns...)...) + excludedRefs = git.ParseLsRemote(lsRemote) + } + + tempBuffer := bytes.NewBufferString("") + + if totalCount == 0 { + tempBuffer.WriteString(fmt.Sprintf("No branch matching \"%v\" were found\n", strings.Join(patterns, " "))) + } + + result := []git.LsRemoteEntry{} + + for _, lsRemoteEntry := range includedRefs { + excluded := false + for _, excl := range excludedRefs { + if excl.Ref == lsRemoteEntry.Ref { + excludedCount++ + excluded = true + break + } + } + + if excluded { + tempBuffer.WriteString("E ") + } else { + tempBuffer.WriteString("I ") + result = append(result, lsRemoteEntry) + } + tempBuffer.WriteString(lsRemoteEntry.Ref + "\n") + } + + count := len(result) + + logger.Printf("%v branches (I)ncluded (%v matching, %v (E)xcluded):\n", count, totalCount, excludedCount) + logger.Print(tempBuffer.String()) + + return result +} diff --git a/octopus/merge/matcher_test.go b/octopus/merge/matcher_test.go new file mode 100644 index 0000000..900f8a4 --- /dev/null +++ b/octopus/merge/matcher_test.go @@ -0,0 +1,67 @@ +package merge + +import ( + "bytes" + "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func setupRepo() (*MergeContext, *bytes.Buffer) { + context, out := CreateTestContext() + head, _ := context.Repo.Git("rev-parse", "HEAD") + context.Repo.Git("update-ref", "refs/heads/test1", head) + context.Repo.Git("update-ref", "refs/remotes/origin/test1", head) + context.Repo.Git("update-ref", "refs/remotes/origin/test2", head) + context.Repo.Git("update-ref", "refs/remotes/origin/test3", head) + + return context, out +} + +func TestResolveBranchListSimple(t *testing.T) { + context, out := setupRepo() + defer test.Cleanup(context.Repo) + + head, _ := context.Repo.Git("rev-parse", "HEAD") + + branchList := resolveBranchList(context.Repo, context.Logger, []string{"refs/heads/*"}, nil) + + expected := []git.LsRemoteEntry{ + {Ref: "refs/heads/master", Sha1: head}, + {Ref: "refs/heads/test1", Sha1: head}, + } + + assert.Equal(t, expected, branchList) + outputString := out.String() + assert.Contains(t, outputString, + "2 branches (I)ncluded (2 matching, 0 (E)xcluded):\n"+ + "I refs/heads/master\n"+ + "I refs/heads/test1\n") +} + +func TestResolveBranchListExclusion(t *testing.T) { + context, out := setupRepo() + defer test.Cleanup(context.Repo) + + head, _ := context.Repo.Git("rev-parse", "HEAD") + + branchList := resolveBranchList(context.Repo, context.Logger, []string{"refs/heads/*", "remotes/origin/*"}, []string{"*/test1"}) + + expected := []git.LsRemoteEntry{ + {Ref: "refs/heads/master", Sha1: head}, + {Ref: "refs/remotes/origin/test2", Sha1: head}, + {Ref: "refs/remotes/origin/test3", Sha1: head}, + } + + assert.Equal(t, expected, branchList) + + outputString := out.String() + assert.Contains(t, outputString, + "3 branches (I)ncluded (5 matching, 2 (E)xcluded):\n"+ + "I refs/heads/master\n"+ + "E refs/heads/test1\n"+ + "E refs/remotes/origin/test1\n"+ + "I refs/remotes/origin/test2\n"+ + "I refs/remotes/origin/test3\n") +} diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go new file mode 100644 index 0000000..cc50d86 --- /dev/null +++ b/octopus/merge/merge.go @@ -0,0 +1,143 @@ +package merge + +import ( + "bytes" + "errors" + "github.com/lesfurets/git-octopus/git" + "log" +) + +type MergeContext struct { + Repo *git.Repository + Logger *log.Logger +} + +func Merge(context *MergeContext, args []string) error { + + octopusConfig, err := GetConfig(context.Repo, args) + + if err != nil { + return err + } + + if len(octopusConfig.Patterns) == 0 { + context.Logger.Println("Nothing to merge. No pattern given") + return nil + } + + status, _ := context.Repo.Git("status", "--porcelain") + + // This is not formally required but it would be an ambiguous behaviour to let git-octopus run on unclean state. + if len(status) != 0 { + return errors.New("The repository has to be clean.") + } + + branchList := resolveBranchList(context.Repo, context.Logger, octopusConfig.Patterns, octopusConfig.ExcludedPatterns) + + if len(branchList) == 0 { + return nil + } + + initialHeadCommit, _ := context.Repo.Git("rev-parse", "HEAD") + + context.Logger.Println() + + parents, err := mergeHeads(context, branchList) + + if !octopusConfig.DoCommit { + context.Repo.Git("reset", "-q", "--hard", initialHeadCommit) + } + + if err != nil { + return err + } + + // parents always contains HEAD. We need at lease 2 parents to create a merge commit + if octopusConfig.DoCommit && parents != nil && len(parents) > 1 { + tree, _ := context.Repo.Git("write-tree") + args := []string{"commit-tree"} + for _, parent := range parents { + args = append(args, "-p", parent) + } + args = append(args, "-m", octopusCommitMessage(branchList), tree) + commit, _ := context.Repo.Git(args...) + context.Repo.Git("update-ref", "HEAD", commit) + } + + return nil +} + +// The logic of this function is copied directly from git-merge-octopus.sh +func mergeHeads(context *MergeContext, remotes []git.LsRemoteEntry) ([]string, error) { + head, _ := context.Repo.Git("rev-parse", "--verify", "-q", "HEAD") + + mrc := []string{head} + mrt, _ := context.Repo.Git("write-tree") + nonFfMerge := false + + for _, lsRemoteEntry := range remotes { + + common, err := context.Repo.Git(append([]string{"merge-base", "--all", lsRemoteEntry.Sha1}, mrc...)...) + + if err != nil { + return nil, errors.New("Unable to find common commit with " + lsRemoteEntry.Ref) + } + + if common == lsRemoteEntry.Sha1 { + context.Logger.Println("Already up-to-date with " + lsRemoteEntry.Ref) + continue + } + + if len(mrc) == 1 && common == mrc[0] && !nonFfMerge { + context.Logger.Println("Fast-forwarding to: " + lsRemoteEntry.Ref) + _, err := context.Repo.Git("read-tree", "-u", "-m", head, lsRemoteEntry.Sha1) + + if err != nil { + return nil, nil + } + + mrc[0] = lsRemoteEntry.Sha1 + mrt, _ = context.Repo.Git("write-tree") + continue + } + + nonFfMerge = true + + context.Logger.Println("Trying simple merge with " + lsRemoteEntry.Ref) + + _, err = context.Repo.Git("read-tree", "-u", "-m", "--aggressive", common, mrt, lsRemoteEntry.Sha1) + + if err != nil { + return nil, err + } + + next, err := context.Repo.Git("write-tree") + + if err != nil { + context.Logger.Println("Simple merge did not work, trying automatic merge.") + _, err = context.Repo.Git("merge-index", "-o", "git-merge-one-file", "-a") + + if err != nil { + context.Logger.Println("Automated merge did not work.") + context.Logger.Println("Should not be doing an Octopus.") + return nil, errors.New("") + } + + next, _ = context.Repo.Git("write-tree") + } + + mrc = append(mrc, lsRemoteEntry.Sha1) + mrt = next + } + + return mrc, nil +} + +func octopusCommitMessage(remotes []git.LsRemoteEntry) string { + buf := bytes.NewBufferString("Merged branches:\n") + for _, lsRemoteEntry := range remotes { + buf.WriteString(lsRemoteEntry.Ref + "\n") + } + buf.WriteString("\nCommit created by git-octopus.\n") + return buf.String() +} diff --git a/octopus/merge/merge_test.go b/octopus/merge/merge_test.go new file mode 100644 index 0000000..9f65d14 --- /dev/null +++ b/octopus/merge/merge_test.go @@ -0,0 +1,234 @@ +package merge + +import ( + "bytes" + "fmt" + "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/test" + "github.com/stretchr/testify/assert" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "testing" +) + +func CreateTestContext() (*MergeContext, *bytes.Buffer) { + dir := test.CreateTempDir() + + repo := git.Repository{Path: dir} + + repo.Git("init") + repo.Git("config", "user.name", "gotest") + repo.Git("config", "user.email", "gotest@golang.com") + _, err := repo.Git("commit", "--allow-empty", "-m\"first commit\"") + + if err != nil { + fmt.Println("There's something wrong with the git installation:") + fmt.Println(err.Error()) + } + + out := bytes.NewBufferString("") + + context := MergeContext{ + Repo: &repo, + Logger: log.New(out, "", 0), + } + + return &context, out +} + +func writeFile(repo *git.Repository, name string, lines ...string) { + fileName := filepath.Join(repo.Path, name) + ioutil.WriteFile(fileName, []byte(strings.Join(lines, "\n")), 0644) +} + +// Basic merge of 3 branches. Asserts the resulting tree and the merge commit +func TestOctopus3branches(t *testing.T) { + context, _ := CreateTestContext() + repo := context.Repo + defer test.Cleanup(repo) + + // Create and commit file foo1 in branch1 + repo.Git("checkout", "-b", "branch1") + writeFile(repo, "foo1", "First line") + repo.Git("add", "foo1") + repo.Git("commit", "-m\"\"") + + // Create and commit file foo2 in branch2 + repo.Git("checkout", "-b", "branch2", "master") + writeFile(repo, "foo2", "First line") + repo.Git("add", "foo2") + repo.Git("commit", "-m\"\"") + + // Create and commit file foo3 in branch3 + repo.Git("checkout", "-b", "branch3", "master") + writeFile(repo, "foo3", "First line") + repo.Git("add", "foo3") + repo.Git("commit", "-m\"\"") + + // Merge the 3 branches in a new octopus branch + repo.Git("checkout", "-b", "octopus", "master") + + err := Merge(context, "branch*") + assert.Nil(t, err) + + // The working tree should have the 3 files and status should be clean + _, err = os.Open(filepath.Join(repo.Path, "foo1")) + assert.Nil(t, err) + _, err = os.Open(filepath.Join(repo.Path, "foo2")) + assert.Nil(t, err) + _, err = os.Open(filepath.Join(repo.Path, "foo3")) + assert.Nil(t, err) + + status, _ := repo.Git("status", "--porcelain") + assert.Empty(t, status) + + // octopus branch should contain the 3 branches + _, err = repo.Git("merge-base", "--is-ancestor", "branch1", "octopus") + assert.Nil(t, err) + _, err = repo.Git("merge-base", "--is-ancestor", "branch2", "octopus") + assert.Nil(t, err) + _, err = repo.Git("merge-base", "--is-ancestor", "branch3", "octopus") + assert.Nil(t, err) + + // Assert the commit message + commitMessage, _ := repo.Git("show", "--pretty=format:%B") // gets the commit body only + + assert.Contains(t, commitMessage, + "Merged branches:\n"+ + "refs/heads/branch1\n"+ + "refs/heads/branch2\n"+ + "refs/heads/branch3\n"+ + "\nCommit created by git-octopus.") +} + +func TestOctopusCommitConfigError(t *testing.T) { + context, _ := CreateTestContext() + defer test.Cleanup(context.Repo) + + context.Repo.Git("config", "octopus.commit", "bad_value") + + err := Merge(context, "-v") + + assert.NotNil(t, err) +} + +func TestOctopusNoPatternGiven(t *testing.T) { + context, out := CreateTestContext() + defer test.Cleanup(context.Repo) + + Merge(context) + + assert.Equal(t, "Nothing to merge. No pattern given\n", out.String()) +} + +func TestOctopusNoBranchMatching(t *testing.T) { + context, out := CreateTestContext() + defer test.Cleanup(context.Repo) + + Merge(context, "refs/remotes/dumb/*", "refs/remotes/dumber/*") + + assert.Contains(t, out.String(), "No branch matching \"refs/remotes/dumb/* refs/remotes/dumber/*\" were found\n") +} + +// Merge a branch that is already merged. +// Should be noop and print something accordingly +func TestOctopusAlreadyUpToDate(t *testing.T) { + context, out := CreateTestContext() + defer test.Cleanup(context.Repo) + + // commit a file in master + writeFile(context.Repo, "foo", "First line") + context.Repo.Git("add", "foo") + context.Repo.Git("commit", "-m\"first commit\"") + + // Create a branch on this first commit. + // master and outdated_branch are on the same commit + context.Repo.Git("branch", "outdated_branch") + + expected, _ := context.Repo.Git("rev-parse", "HEAD") + + err := Merge(context, "outdated_branch") + + actual, _ := context.Repo.Git("rev-parse", "HEAD") + + // HEAD should point to the same commit + assert.Equal(t, expected, actual) + + // This is a normal behavious, no error should be raised + assert.Nil(t, err) + + assert.Contains(t, out.String(), "Already up-to-date with refs/heads/outdated_branch") +} + +// git-octopus should prevent from running if status is not clean +func TestUncleanStateFail(t *testing.T) { + context, _ := CreateTestContext() + defer test.Cleanup(context.Repo) + + // create and commit a file + writeFile(context.Repo, "foo", "First line") + + err := Merge(context, "*") + + if assert.NotNil(t, err) { + assert.Contains(t, err.Error(), "The repository has to be clean.") + } +} + +func TestFastForward(t *testing.T) { + context, _ := CreateTestContext() + repo := context.Repo + defer test.Cleanup(repo) + + // The repo is on master branch with an empty tree + // Create a branch with a new file + repo.Git("checkout", "-b", "new_branch") + writeFile(repo, "foo", "bar") + repo.Git("add", "foo") + repo.Git("commit", "-m", "added foo") + + repo.Git("checkout", "master") + + expected, _ := repo.Git("rev-parse", "HEAD") + + Merge(context, "-n", "new_branch") + + actual, _ := repo.Git("rev-parse", "HEAD") + assert.Equal(t, expected, actual) + + status, _ := repo.Git("status", "--porcelain") + assert.Empty(t, status) +} + +func TestConflictState(t *testing.T) { + context, _ := CreateTestContext() + repo := context.Repo + defer test.Cleanup(repo) + + writeFile(repo, "foo", "line 1", "") + repo.Git("add", ".") + repo.Git("commit", "-m", "added foo") + + writeFile(repo, "foo", "line 1", "line 2") + repo.Git("commit", "-a", "-m", "edited foo") + + repo.Git("checkout", "-b", "a_branch", "HEAD^") + + writeFile(repo, "foo", "line 1", "line 2 bis") + repo.Git("commit", "-a", "-m", "edited foo in parallel to master") + + repo.Git("checkout", "master") + expected, _ := repo.Git("rev-parse", "HEAD") + + err := Merge(context, "-n", "a_branch") + + assert.NotNil(t, err) + actual, _ := repo.Git("rev-parse", "HEAD") + assert.Equal(t, expected, actual) + + status, _ := repo.Git("status", "--porcelain") + assert.Empty(t, status) +} From 089b0b239c4f171a7f0f8d084ca963c620242f7f Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 13:37:41 +0200 Subject: [PATCH 07/16] Migrate config file to viper * Make flags behave like gitconfigs and git flags usually work together If not set has a default value, overrided by config file, and lastly overrided by the usage of the flag * The new config file is $HOME/.octopus.yaml If not found, checks ./.octopus.yaml * config file can be passed via the flag `--config myconfigfile` --- octopus/cmd/merge.go | 8 +- octopus/cmd/root.go | 1 + octopus/merge/config.go | 85 +++----------------- octopus/merge/config_test.go | 146 ++++++----------------------------- octopus/merge/merge.go | 2 +- octopus/merge/merge_test.go | 25 ++---- 6 files changed, 48 insertions(+), 219 deletions(-) diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 1a5ff97..0ae908f 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -18,6 +18,7 @@ import ( "github.com/lesfurets/git-octopus/git" "github.com/lesfurets/git-octopus/octopus/merge" "github.com/spf13/cobra" + "github.com/spf13/viper" "log" "os" "os/signal" @@ -81,12 +82,15 @@ func init() { // --check mergeCmd.PersistentFlags().Bool("check", false, "checks if the merge works. Leaves the repository back to HEAD") + viper.BindPFlag("check", mergeCmd.PersistentFlags().Lookup("check")) // --chunk - mergeCmd.PersistentFlags().Int("chunk", 1, "do the octopus merge split by chunks of n branches") + mergeCmd.PersistentFlags().Int("chunk", 0, "do the octopus merge split by chunks of n branches") + viper.BindPFlag("chunk", mergeCmd.PersistentFlags().Lookup("chunk")) // --exclude - mergeCmd.PersistentFlags().StringArray("exclude", nil, "excludes branches matching the pattern") + mergeCmd.PersistentFlags().StringSlice("exclude", nil, "excludes branches matching the pattern") + viper.BindPFlag("exclude", mergeCmd.PersistentFlags().Lookup("exclude")) // Cobra supports local flags which will only run when this command // is called directly, e.g.: diff --git a/octopus/cmd/root.go b/octopus/cmd/root.go index 76040ce..07ef0e6 100644 --- a/octopus/cmd/root.go +++ b/octopus/cmd/root.go @@ -61,6 +61,7 @@ func initConfig() { viper.SetConfigName(".octopus") // name of config file (without extension) viper.AddConfigPath("$HOME") // adding home directory as first search path + viper.AddConfigPath(".") // adding current directory as second search path viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. diff --git a/octopus/merge/config.go b/octopus/merge/config.go index 2af1972..3f6f7e0 100644 --- a/octopus/merge/config.go +++ b/octopus/merge/config.go @@ -1,11 +1,7 @@ package merge import ( - "errors" - "flag" - "github.com/lesfurets/git-octopus/git" - "strconv" - "strings" + "github.com/spf13/viper" ) type Config struct { @@ -15,79 +11,16 @@ type Config struct { Patterns []string } -type excluded_patterns []string - -func (e *excluded_patterns) String() string { - return strings.Join(*e, ",") -} - -func (e *excluded_patterns) Set(value string) error { - *e = append(*e, value) - return nil -} - -func GetConfig(repo *git.Repository, args []string) (*Config, error) { - - var noCommitArg, commitArg bool - var chunkSizeArg int - var excludedPatternsArg excluded_patterns - - var commandLine = flag.NewFlagSet("git-octopus", flag.ExitOnError) - commandLine.BoolVar(&noCommitArg, "n", false, "leaves the repository back to HEAD.") - commandLine.BoolVar(&commitArg, "c", false, "Commit the resulting merge in the current branch.") - commandLine.IntVar(&chunkSizeArg, "s", 0, "do the octopus by chunk of n branches.") - commandLine.Var(&excludedPatternsArg, "e", "exclude branches matching the pattern.") - - commandLine.Parse(args) - - var configCommit bool - - rawConfigCommit, err := repo.Git("config", "octopus.commit") - - if err != nil { - configCommit = true - } else { - configCommit, err = strconv.ParseBool(rawConfigCommit) - if err != nil { - return nil, errors.New("Config octopus.commit should be boolean. Given \"" + rawConfigCommit + "\"") - } - } - - if commitArg { - configCommit = true - } - - if noCommitArg { - configCommit = false - } - - configExcludedPatterns, _ := repo.Git("config", "--get-all", "octopus.excludePattern") - - var excludedPatterns []string - - if len(configExcludedPatterns) > 0 { - excludedPatterns = strings.Split(configExcludedPatterns, "\n") - } - - if len(excludedPatternsArg) > 0 { - excludedPatterns = excludedPatternsArg - } - - configPatterns, _ := repo.Git("config", "--get-all", "octopus.pattern") - - var patterns []string - - if len(configPatterns) > 0 { - patterns = strings.Split(configPatterns, "\n") - } - - if commandLine.NArg() > 0 { - patterns = commandLine.Args() - } +func GetConfig(args []string) (*Config, error) { + var check = viper.GetBool("check") + var chunk = viper.GetInt("chunk") + var excludedPatterns = viper.GetStringSlice("exclude") + var patterns = args return &Config{ - DoCommit: configCommit, - ChunkSize: chunkSizeArg, + //TODO reverse DoCommit to NoCommit to simplify logic + DoCommit: !check, + ChunkSize: chunk, ExcludedPatterns: excludedPatterns, Patterns: patterns, }, nil diff --git a/octopus/merge/config_test.go b/octopus/merge/config_test.go index 638510a..30c1398 100644 --- a/octopus/merge/config_test.go +++ b/octopus/merge/config_test.go @@ -1,148 +1,50 @@ package merge import ( - "github.com/lesfurets/git-octopus/git" - "github.com/lesfurets/git-octopus/test" "github.com/stretchr/testify/assert" "testing" ) -func createTestRepo() *git.Repository { - dir := test.CreateTempDir() - - repo := git.Repository{Path: dir} - - repo.Git("init") - - return &repo -} - func TestDoCommit(t *testing.T) { - repo := createTestRepo() - defer test.Cleanup(repo) - - // GIVEN no config, no option - // WHEN - octopusConfig, err := GetConfig(repo, nil) - - // THEN doCommit should be true - assert.True(t, octopusConfig.DoCommit) - assert.Nil(t, err) - - // GIVEN config to false, no option - repo.Git("config", "octopus.commit", "false") - // WHEN - octopusConfig, err = GetConfig(repo, nil) - - // THEN doCommit should be false - assert.False(t, octopusConfig.DoCommit) - assert.Nil(t, err) - - // Config to 0, no option. doCommit should be true - repo.Git("config", "octopus.commit", "0") - octopusConfig, err = GetConfig(repo, nil) - - assert.False(t, octopusConfig.DoCommit) - assert.Nil(t, err) - - // GIVEN config to false, -c option true - repo.Git("config", "octopus.commit", "false") - // WHEN - octopusConfig, err = GetConfig(repo, []string{"-c"}) + //given + args := []string{"a", "b", "c"} - // THEN doCommit should be true - assert.True(t, octopusConfig.DoCommit) - assert.Nil(t, err) + //when + cfg, _ := GetConfig(args) - // GIVEN config to true, -n option true - repo.Git("config", "octopus.commit", "true") - // WHEN - octopusConfig, err = GetConfig(repo, []string{"-n"}) - - // THEN doCommit should be false - assert.False(t, octopusConfig.DoCommit) - assert.Nil(t, err) + //then + assert.Equal(t, cfg.DoCommit, true) } func TestChunkMode(t *testing.T) { - repo := createTestRepo() - defer test.Cleanup(repo) - - // GIVEN No option - // WHEN - octopusConfig, err := GetConfig(repo, nil) - - // THEN chunkSize should be 0 - assert.Equal(t, 0, octopusConfig.ChunkSize) - assert.Nil(t, err) + //given + args := []string{"a", "b", "c"} - // GIVEN option -s 5 - // WHEN - octopusConfig, err = GetConfig(repo, []string{"-s", "5"}) + //when + cfg, _ := GetConfig(args) - // THEN chunkSize should be 5 - assert.Equal(t, 5, octopusConfig.ChunkSize) - assert.Nil(t, err) + //then + assert.Equal(t, cfg.ChunkSize, 0) } func TestExcludedPatterns(t *testing.T) { - repo := createTestRepo() - defer test.Cleanup(repo) - - // GIVEN no config, no option - // WHEN - octopusConfig, err := GetConfig(repo, nil) - - // THEN excludedPatterns should be empty - assert.Empty(t, octopusConfig.ExcludedPatterns) - assert.Nil(t, err) - - // GIVEN excludePattern config, no option - repo.Git("config", "octopus.excludePattern", "excluded/*") - repo.Git("config", "--add", "octopus.excludePattern", "excluded_branch") - // WHEN - octopusConfig, err = GetConfig(repo, nil) + //given + args := []string{"a", "b", "c"} - // THEN excludedPatterns should be set - assert.Equal(t, []string{"excluded/*", "excluded_branch"}, octopusConfig.ExcludedPatterns) - assert.Nil(t, err) + //when + cfg, _ := GetConfig(args) - // GIVEN excludePattern config (from previous assertion), option given - // WHEN - octopusConfig, err = GetConfig(repo, []string{"-e", "override_excluded"}) - - // THEN option should take precedence - assert.Equal(t, []string{"override_excluded"}, octopusConfig.ExcludedPatterns) - assert.Nil(t, err) + //then + assert.Empty(t, cfg.ExcludedPatterns) } func TestPatterns(t *testing.T) { - repo := createTestRepo() - defer test.Cleanup(repo) - - // GIVEN no config, no option - // WHEN - octopusConfig, err := GetConfig(repo, nil) - - // THEN excludedPatterns should be empty - assert.Empty(t, octopusConfig.Patterns) - assert.Nil(t, err) - - // GIVEN config, no argument. - repo.Git("config", "octopus.pattern", "test") - repo.Git("config", "--add", "octopus.pattern", "test2") - // WHEN - octopusConfig, err = GetConfig(repo, nil) - - // THEN patterns should be set - assert.Equal(t, []string{"test", "test2"}, octopusConfig.Patterns) - assert.Nil(t, err) + //given + args := []string{"a", "b", "c"} - // GIVEN config (from previous assertion), argument given - // WHEN - octopusConfig, err = GetConfig(repo, []string{"arg1", "arg2"}) + //when + cfg, _ := GetConfig(args) - // THEN arguments should take precedence - assert.Equal(t, []string{"arg1", "arg2"}, octopusConfig.Patterns) - assert.Nil(t, err) + //then + assert.Equal(t, cfg.Patterns, args) } diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go index cc50d86..811d7dc 100644 --- a/octopus/merge/merge.go +++ b/octopus/merge/merge.go @@ -14,7 +14,7 @@ type MergeContext struct { func Merge(context *MergeContext, args []string) error { - octopusConfig, err := GetConfig(context.Repo, args) + octopusConfig, err := GetConfig(args) if err != nil { return err diff --git a/octopus/merge/merge_test.go b/octopus/merge/merge_test.go index 9f65d14..d37f8c1 100644 --- a/octopus/merge/merge_test.go +++ b/octopus/merge/merge_test.go @@ -71,7 +71,7 @@ func TestOctopus3branches(t *testing.T) { // Merge the 3 branches in a new octopus branch repo.Git("checkout", "-b", "octopus", "master") - err := Merge(context, "branch*") + err := Merge(context, []string{"branch*"}) assert.Nil(t, err) // The working tree should have the 3 files and status should be clean @@ -104,22 +104,11 @@ func TestOctopus3branches(t *testing.T) { "\nCommit created by git-octopus.") } -func TestOctopusCommitConfigError(t *testing.T) { - context, _ := CreateTestContext() - defer test.Cleanup(context.Repo) - - context.Repo.Git("config", "octopus.commit", "bad_value") - - err := Merge(context, "-v") - - assert.NotNil(t, err) -} - func TestOctopusNoPatternGiven(t *testing.T) { context, out := CreateTestContext() defer test.Cleanup(context.Repo) - Merge(context) + Merge(context, nil) assert.Equal(t, "Nothing to merge. No pattern given\n", out.String()) } @@ -128,7 +117,7 @@ func TestOctopusNoBranchMatching(t *testing.T) { context, out := CreateTestContext() defer test.Cleanup(context.Repo) - Merge(context, "refs/remotes/dumb/*", "refs/remotes/dumber/*") + Merge(context, []string{"refs/remotes/dumb/*", "refs/remotes/dumber/*"}) assert.Contains(t, out.String(), "No branch matching \"refs/remotes/dumb/* refs/remotes/dumber/*\" were found\n") } @@ -150,7 +139,7 @@ func TestOctopusAlreadyUpToDate(t *testing.T) { expected, _ := context.Repo.Git("rev-parse", "HEAD") - err := Merge(context, "outdated_branch") + err := Merge(context, []string{"outdated_branch"}) actual, _ := context.Repo.Git("rev-parse", "HEAD") @@ -171,7 +160,7 @@ func TestUncleanStateFail(t *testing.T) { // create and commit a file writeFile(context.Repo, "foo", "First line") - err := Merge(context, "*") + err := Merge(context, []string{"*"}) if assert.NotNil(t, err) { assert.Contains(t, err.Error(), "The repository has to be clean.") @@ -194,7 +183,7 @@ func TestFastForward(t *testing.T) { expected, _ := repo.Git("rev-parse", "HEAD") - Merge(context, "-n", "new_branch") + Merge(context, []string{"-n", "new_branch"}) actual, _ := repo.Git("rev-parse", "HEAD") assert.Equal(t, expected, actual) @@ -223,7 +212,7 @@ func TestConflictState(t *testing.T) { repo.Git("checkout", "master") expected, _ := repo.Git("rev-parse", "HEAD") - err := Merge(context, "-n", "a_branch") + err := Merge(context, []string{"-n", "a_branch"}) assert.NotNil(t, err) actual, _ := repo.Git("rev-parse", "HEAD") From 921379347e09745bb193b0597203ba72e854754c Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 17:13:37 +0200 Subject: [PATCH 08/16] Reverse and rename DoCommit to NoCommit --- octopus/merge/config.go | 5 ++--- octopus/merge/config_test.go | 4 ++-- octopus/merge/merge.go | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/octopus/merge/config.go b/octopus/merge/config.go index 3f6f7e0..60eecb8 100644 --- a/octopus/merge/config.go +++ b/octopus/merge/config.go @@ -5,7 +5,7 @@ import ( ) type Config struct { - DoCommit bool + NoCommit bool ChunkSize int ExcludedPatterns []string Patterns []string @@ -18,8 +18,7 @@ func GetConfig(args []string) (*Config, error) { var patterns = args return &Config{ - //TODO reverse DoCommit to NoCommit to simplify logic - DoCommit: !check, + NoCommit: check, ChunkSize: chunk, ExcludedPatterns: excludedPatterns, Patterns: patterns, diff --git a/octopus/merge/config_test.go b/octopus/merge/config_test.go index 30c1398..640ee47 100644 --- a/octopus/merge/config_test.go +++ b/octopus/merge/config_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -func TestDoCommit(t *testing.T) { +func TestNoCommit(t *testing.T) { //given args := []string{"a", "b", "c"} @@ -13,7 +13,7 @@ func TestDoCommit(t *testing.T) { cfg, _ := GetConfig(args) //then - assert.Equal(t, cfg.DoCommit, true) + assert.Equal(t, cfg.NoCommit, false) } func TestChunkMode(t *testing.T) { diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go index 811d7dc..991fb26 100644 --- a/octopus/merge/merge.go +++ b/octopus/merge/merge.go @@ -44,7 +44,7 @@ func Merge(context *MergeContext, args []string) error { parents, err := mergeHeads(context, branchList) - if !octopusConfig.DoCommit { + if octopusConfig.NoCommit { context.Repo.Git("reset", "-q", "--hard", initialHeadCommit) } @@ -53,7 +53,7 @@ func Merge(context *MergeContext, args []string) error { } // parents always contains HEAD. We need at lease 2 parents to create a merge commit - if octopusConfig.DoCommit && parents != nil && len(parents) > 1 { + if !octopusConfig.NoCommit && parents != nil && len(parents) > 1 { tree, _ := context.Repo.Git("write-tree") args := []string{"commit-tree"} for _, parent := range parents { From e6375761fd0431a84472c1ac1de2143f5da58851 Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 17:39:23 +0200 Subject: [PATCH 09/16] Rename octopusConfig to mergeConfig * conf is only used by merge --- octopus/merge/merge.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go index 991fb26..3584b5f 100644 --- a/octopus/merge/merge.go +++ b/octopus/merge/merge.go @@ -14,13 +14,13 @@ type MergeContext struct { func Merge(context *MergeContext, args []string) error { - octopusConfig, err := GetConfig(args) + mergeConfig, err := GetConfig(args) if err != nil { return err } - if len(octopusConfig.Patterns) == 0 { + if len(mergeConfig.Patterns) == 0 { context.Logger.Println("Nothing to merge. No pattern given") return nil } @@ -32,7 +32,7 @@ func Merge(context *MergeContext, args []string) error { return errors.New("The repository has to be clean.") } - branchList := resolveBranchList(context.Repo, context.Logger, octopusConfig.Patterns, octopusConfig.ExcludedPatterns) + branchList := resolveBranchList(context.Repo, context.Logger, mergeConfig.Patterns, mergeConfig.ExcludedPatterns) if len(branchList) == 0 { return nil @@ -44,7 +44,7 @@ func Merge(context *MergeContext, args []string) error { parents, err := mergeHeads(context, branchList) - if octopusConfig.NoCommit { + if mergeConfig.NoCommit { context.Repo.Git("reset", "-q", "--hard", initialHeadCommit) } @@ -53,7 +53,7 @@ func Merge(context *MergeContext, args []string) error { } // parents always contains HEAD. We need at lease 2 parents to create a merge commit - if !octopusConfig.NoCommit && parents != nil && len(parents) > 1 { + if !mergeConfig.NoCommit && parents != nil && len(parents) > 1 { tree, _ := context.Repo.Git("write-tree") args := []string{"commit-tree"} for _, parent := range parents { From c201492e3997c324a31714f9f1d0fa1dd85a34a1 Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 18:09:59 +0200 Subject: [PATCH 10/16] Pass config to merge.Merge for better testability --- octopus/cmd/merge.go | 7 ++++++- octopus/merge/merge.go | 8 +------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 0ae908f..23885cf 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -46,7 +46,12 @@ func runMergeCmd(cmd *cobra.Command, args []string) { go handleSignals(signalChan, &context) - err := merge.Merge(&context, args) + conf, err := merge.GetConfig(args) + if err != nil { + log.Fatalln(err) + } + + err = merge.Merge(&context, conf) if err != nil { if len(err.Error()) > 0 { diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go index 3584b5f..da9079a 100644 --- a/octopus/merge/merge.go +++ b/octopus/merge/merge.go @@ -12,13 +12,7 @@ type MergeContext struct { Logger *log.Logger } -func Merge(context *MergeContext, args []string) error { - - mergeConfig, err := GetConfig(args) - - if err != nil { - return err - } +func Merge(context *MergeContext, mergeConfig *Config) error { if len(mergeConfig.Patterns) == 0 { context.Logger.Println("Nothing to merge. No pattern given") From e195203406c4d5420448d5aba5bfbdd80426ddf5 Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 18:16:35 +0200 Subject: [PATCH 11/16] Fix merge tests * Pass a merge.Config to the tested Merge method --- octopus/merge/merge_test.go | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/octopus/merge/merge_test.go b/octopus/merge/merge_test.go index d37f8c1..35533a9 100644 --- a/octopus/merge/merge_test.go +++ b/octopus/merge/merge_test.go @@ -71,7 +71,10 @@ func TestOctopus3branches(t *testing.T) { // Merge the 3 branches in a new octopus branch repo.Git("checkout", "-b", "octopus", "master") - err := Merge(context, []string{"branch*"}) + config := Config{ + Patterns: []string{"branch*"}, + } + err := Merge(context, &config) assert.Nil(t, err) // The working tree should have the 3 files and status should be clean @@ -108,7 +111,8 @@ func TestOctopusNoPatternGiven(t *testing.T) { context, out := CreateTestContext() defer test.Cleanup(context.Repo) - Merge(context, nil) + config := Config{} + Merge(context, &config) assert.Equal(t, "Nothing to merge. No pattern given\n", out.String()) } @@ -117,7 +121,10 @@ func TestOctopusNoBranchMatching(t *testing.T) { context, out := CreateTestContext() defer test.Cleanup(context.Repo) - Merge(context, []string{"refs/remotes/dumb/*", "refs/remotes/dumber/*"}) + config := Config{ + Patterns: []string{"refs/remotes/dumb/*", "refs/remotes/dumber/*"}, + } + Merge(context, &config) assert.Contains(t, out.String(), "No branch matching \"refs/remotes/dumb/* refs/remotes/dumber/*\" were found\n") } @@ -139,7 +146,10 @@ func TestOctopusAlreadyUpToDate(t *testing.T) { expected, _ := context.Repo.Git("rev-parse", "HEAD") - err := Merge(context, []string{"outdated_branch"}) + config := Config{ + Patterns: []string{"outdated_branch"}, + } + err := Merge(context, &config) actual, _ := context.Repo.Git("rev-parse", "HEAD") @@ -160,7 +170,10 @@ func TestUncleanStateFail(t *testing.T) { // create and commit a file writeFile(context.Repo, "foo", "First line") - err := Merge(context, []string{"*"}) + config := Config{ + Patterns: []string{"*"}, + } + err := Merge(context, &config) if assert.NotNil(t, err) { assert.Contains(t, err.Error(), "The repository has to be clean.") @@ -183,7 +196,11 @@ func TestFastForward(t *testing.T) { expected, _ := repo.Git("rev-parse", "HEAD") - Merge(context, []string{"-n", "new_branch"}) + config := Config{ + NoCommit: true, + Patterns: []string{"new_branch"}, + } + Merge(context, &config) actual, _ := repo.Git("rev-parse", "HEAD") assert.Equal(t, expected, actual) @@ -212,7 +229,11 @@ func TestConflictState(t *testing.T) { repo.Git("checkout", "master") expected, _ := repo.Git("rev-parse", "HEAD") - err := Merge(context, []string{"-n", "a_branch"}) + config := Config{ + NoCommit: true, + Patterns: []string{"a_branch"}, + } + err := Merge(context, &config) assert.NotNil(t, err) actual, _ := repo.Git("rev-parse", "HEAD") From b120b0269981a4d01f66e3a8158297ba06d44e88 Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 18:22:07 +0200 Subject: [PATCH 12/16] Migrate packages git and test to ./octopus/ --- octopus/cmd/merge.go | 2 +- octopus/git/git.go | 51 +++++++++++++++++++++++++++++++++ octopus/git/git_test.go | 53 +++++++++++++++++++++++++++++++++++ octopus/merge/matcher.go | 2 +- octopus/merge/matcher_test.go | 4 +-- octopus/merge/merge.go | 2 +- octopus/merge/merge_test.go | 4 +-- octopus/test/utils.go | 16 +++++++++++ 8 files changed, 127 insertions(+), 7 deletions(-) create mode 100644 octopus/git/git.go create mode 100644 octopus/git/git_test.go create mode 100644 octopus/test/utils.go diff --git a/octopus/cmd/merge.go b/octopus/cmd/merge.go index 23885cf..ee3bd62 100644 --- a/octopus/cmd/merge.go +++ b/octopus/cmd/merge.go @@ -15,7 +15,7 @@ package cmd import ( - "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/octopus/git" "github.com/lesfurets/git-octopus/octopus/merge" "github.com/spf13/cobra" "github.com/spf13/viper" diff --git a/octopus/git/git.go b/octopus/git/git.go new file mode 100644 index 0000000..29a5299 --- /dev/null +++ b/octopus/git/git.go @@ -0,0 +1,51 @@ +package git + +import ( + "bufio" + "bytes" + "errors" + "os/exec" + "strings" +) + +type LsRemoteEntry struct { + Ref string + Sha1 string +} + +// Takes the output of git-ls-remote. Returns a map refsname => sha1 +func ParseLsRemote(lsRemoteOutput string) []LsRemoteEntry { + result := []LsRemoteEntry{} + + if len(lsRemoteOutput) == 0 { + return result + } + + scanner := bufio.NewScanner(strings.NewReader(lsRemoteOutput)) + + for scanner.Scan() { + split := strings.Split(scanner.Text(), "\t") + result = append(result, LsRemoteEntry{Ref: split[1], Sha1: split[0]}) + } + + return result +} + +type Repository struct { + Path string +} + +func (repo *Repository) Git(args ...string) (string, error) { + cmd := exec.Command("git", append([]string{"-C", repo.Path}, args...)...) + errOut := bytes.NewBufferString("") + cmd.Stderr = errOut + out, err := cmd.Output() + + if err != nil { + return "", errors.New(errOut.String()) + } + + stringOut := strings.TrimSpace(string(out[:])) + + return stringOut, err +} diff --git a/octopus/git/git_test.go b/octopus/git/git_test.go new file mode 100644 index 0000000..fa990a9 --- /dev/null +++ b/octopus/git/git_test.go @@ -0,0 +1,53 @@ +package git + +import ( + "github.com/stretchr/testify/assert" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestParseLsRemoteEmpty(t *testing.T) { + assert.NotNil(t, ParseLsRemote("")) + assert.Equal(t, []LsRemoteEntry{}, ParseLsRemote("")) +} + +func TestParseLsRemote(t *testing.T) { + lsRemoteOutput := "d8dd4eadaf3c1075eff3b7d4fe6bec5fbfe76b4c refs/heads/master\n" + + "5b2b1bf1cdf1150f34bd5809a038b292dc560998 refs/heads/go_rewrite" + expected := []LsRemoteEntry{ + {Ref: "refs/heads/master", Sha1: "d8dd4eadaf3c1075eff3b7d4fe6bec5fbfe76b4c"}, + {Ref: "refs/heads/go_rewrite", Sha1: "5b2b1bf1cdf1150f34bd5809a038b292dc560998"}, + } + assert.Equal(t, expected, ParseLsRemote(lsRemoteOutput)) +} + +func TestGitCommand(t *testing.T) { + dir, _ := ioutil.TempDir("", "git-octopus-test-") + defer os.RemoveAll(dir) + + repo := Repository{Path: dir} + + repo.Git("init") + + _, err := os.Stat(filepath.Join(dir, ".git")) + + assert.Nil(t, err) +} + +func TestGitError(t *testing.T) { + dir, _ := ioutil.TempDir("", "git-octopus-test-") + defer os.RemoveAll(dir) + + repo := Repository{Path: dir} + + repo.Git("init") + + _, err := repo.Git("rev-parse", "HEAD") + + if assert.NotNil(t, err) { + assert.Contains(t, err.Error(), + "ambiguous argument 'HEAD': unknown revision or path not in the working tree.") + } +} diff --git a/octopus/merge/matcher.go b/octopus/merge/matcher.go index 7c9e4eb..4841137 100644 --- a/octopus/merge/matcher.go +++ b/octopus/merge/matcher.go @@ -3,7 +3,7 @@ package merge import ( "bytes" "fmt" - "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/octopus/git" "log" "strings" ) diff --git a/octopus/merge/matcher_test.go b/octopus/merge/matcher_test.go index 900f8a4..051e92e 100644 --- a/octopus/merge/matcher_test.go +++ b/octopus/merge/matcher_test.go @@ -2,8 +2,8 @@ package merge import ( "bytes" - "github.com/lesfurets/git-octopus/git" - "github.com/lesfurets/git-octopus/test" + "github.com/lesfurets/git-octopus/octopus/git" + "github.com/lesfurets/git-octopus/octopus/test" "github.com/stretchr/testify/assert" "testing" ) diff --git a/octopus/merge/merge.go b/octopus/merge/merge.go index da9079a..714a6e8 100644 --- a/octopus/merge/merge.go +++ b/octopus/merge/merge.go @@ -3,7 +3,7 @@ package merge import ( "bytes" "errors" - "github.com/lesfurets/git-octopus/git" + "github.com/lesfurets/git-octopus/octopus/git" "log" ) diff --git a/octopus/merge/merge_test.go b/octopus/merge/merge_test.go index 35533a9..1f25f44 100644 --- a/octopus/merge/merge_test.go +++ b/octopus/merge/merge_test.go @@ -3,8 +3,8 @@ package merge import ( "bytes" "fmt" - "github.com/lesfurets/git-octopus/git" - "github.com/lesfurets/git-octopus/test" + "github.com/lesfurets/git-octopus/octopus/git" + "github.com/lesfurets/git-octopus/octopus/test" "github.com/stretchr/testify/assert" "io/ioutil" "log" diff --git a/octopus/test/utils.go b/octopus/test/utils.go new file mode 100644 index 0000000..f08aa16 --- /dev/null +++ b/octopus/test/utils.go @@ -0,0 +1,16 @@ +package test + +import ( + "github.com/lesfurets/git-octopus/octopus/git" + "io/ioutil" + "os" +) + +func CreateTempDir() string { + dir, _ := ioutil.TempDir("", "git-octopus-test-") + return dir +} + +func Cleanup(repo *git.Repository) error { + return os.RemoveAll(repo.Path) +} From 72b12918fc55b6bcc6fbeb41c035d7be81deda08 Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 18:29:23 +0200 Subject: [PATCH 13/16] Revert all changes made to the git-octopus cli * Should fix old tests/compile --- config/config.go | 5 ++++- run/run.go | 9 ++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 75efd42..1bade56 100644 --- a/config/config.go +++ b/config/config.go @@ -9,6 +9,7 @@ import ( ) type OctopusConfig struct { + PrintVersion bool DoCommit bool ChunkSize int ExcludedPatterns []string @@ -28,11 +29,12 @@ func (e *excluded_patterns) Set(value string) error { func GetOctopusConfig(repo *git.Repository, args []string) (*OctopusConfig, error) { - var noCommitArg, commitArg bool + var printVersion, noCommitArg, commitArg bool var chunkSizeArg int var excludedPatternsArg excluded_patterns var commandLine = flag.NewFlagSet("git-octopus", flag.ExitOnError) + commandLine.BoolVar(&printVersion, "v", false, "prints the version of git-octopus.") commandLine.BoolVar(&noCommitArg, "n", false, "leaves the repository back to HEAD.") commandLine.BoolVar(&commitArg, "c", false, "Commit the resulting merge in the current branch.") commandLine.IntVar(&chunkSizeArg, "s", 0, "do the octopus by chunk of n branches.") @@ -86,6 +88,7 @@ func GetOctopusConfig(repo *git.Repository, args []string) (*OctopusConfig, erro } return &OctopusConfig{ + PrintVersion: printVersion, DoCommit: configCommit, ChunkSize: chunkSizeArg, ExcludedPatterns: excludedPatterns, diff --git a/run/run.go b/run/run.go index 6fa22ea..f13ae47 100644 --- a/run/run.go +++ b/run/run.go @@ -13,6 +13,8 @@ type OctopusContext struct { Logger *log.Logger } +const VERSION = "2.0" + func Run(context *OctopusContext, args ...string) error { octopusConfig, err := config.GetOctopusConfig(context.Repo, args) @@ -21,6 +23,11 @@ func Run(context *OctopusContext, args ...string) error { return err } + if octopusConfig.PrintVersion { + context.Logger.Println(VERSION) + return nil + } + if len(octopusConfig.Patterns) == 0 { context.Logger.Println("Nothing to merge. No pattern given") return nil @@ -148,6 +155,6 @@ func octopusCommitMessage(remotes []git.LsRemoteEntry) string { for _, lsRemoteEntry := range remotes { buf.WriteString(lsRemoteEntry.Ref + "\n") } - buf.WriteString("\nCommit created by git-octopus.\n") + buf.WriteString("\nCommit created by git-octopus " + VERSION + ".\n") return buf.String() } From 51da3e4664c47bed7ee3800267baee9cd626deed Mon Sep 17 00:00:00 2001 From: hvi Date: Sat, 8 Jul 2017 18:41:46 +0200 Subject: [PATCH 14/16] Add vendored dependencies to cobra, viper and testify * Via `gvt fetch` --- .../github.com/armon/consul-api/LICENSE | 362 + .../vendor/github.com/armon/consul-api/acl.go | 140 + .../github.com/armon/consul-api/agent.go | 272 + .../vendor/github.com/armon/consul-api/api.go | 323 + .../github.com/armon/consul-api/catalog.go | 181 + .../github.com/armon/consul-api/event.go | 104 + .../github.com/armon/consul-api/health.go | 136 + .../vendor/github.com/armon/consul-api/kv.go | 219 + .../github.com/armon/consul-api/session.go | 204 + .../github.com/armon/consul-api/status.go | 43 + .../github.com/coreos/etcd/client/LICENSE | 202 + .../coreos/etcd/client/auth_role.go | 237 + .../coreos/etcd/client/auth_user.go | 320 + .../coreos/etcd/client/cancelreq.go | 18 + .../github.com/coreos/etcd/client/client.go | 703 + .../coreos/etcd/client/cluster_error.go | 37 + .../github.com/coreos/etcd/client/curl.go | 70 + .../github.com/coreos/etcd/client/discover.go | 40 + .../github.com/coreos/etcd/client/doc.go | 73 + .../coreos/etcd/client/integration/doc.go | 17 + .../coreos/etcd/client/keys.generated.go | 1087 + .../github.com/coreos/etcd/client/keys.go | 682 + .../github.com/coreos/etcd/client/members.go | 304 + .../github.com/coreos/etcd/client/util.go | 53 + .../coreos/etcd/pkg/pathutil/LICENSE | 202 + .../coreos/etcd/pkg/pathutil/path.go | 31 + .../github.com/coreos/etcd/pkg/srv/LICENSE | 202 + .../github.com/coreos/etcd/pkg/srv/srv.go | 140 + .../github.com/coreos/etcd/pkg/types/LICENSE | 202 + .../github.com/coreos/etcd/pkg/types/doc.go | 17 + .../github.com/coreos/etcd/pkg/types/id.go | 41 + .../github.com/coreos/etcd/pkg/types/set.go | 178 + .../github.com/coreos/etcd/pkg/types/slice.go | 22 + .../github.com/coreos/etcd/pkg/types/urls.go | 82 + .../coreos/etcd/pkg/types/urlsmap.go | 107 + .../github.com/coreos/etcd/version/LICENSE | 202 + .../github.com/coreos/etcd/version/version.go | 56 + .../coreos/go-semver/semver/LICENSE | 202 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + .../cpuguy83/go-md2man/md2man/LICENSE.md | 21 + .../cpuguy83/go-md2man/md2man/md2man.go | 19 + .../cpuguy83/go-md2man/md2man/roff.go | 301 + .../russross/blackfriday/LICENSE.md | 21 + .../github.com/russross/blackfriday/block.go | 1424 + .../github.com/russross/blackfriday/html.go | 949 + .../github.com/russross/blackfriday/inline.go | 1148 + .../github.com/russross/blackfriday/latex.go | 332 + .../russross/blackfriday/markdown.go | 926 + .../russross/blackfriday/smartypants.go | 400 + .../shurcooL/sanitized_anchor_name/LICENSE.md | 21 + .../shurcooL/sanitized_anchor_name/main.go | 29 + .../github.com/fsnotify/fsnotify/LICENSE | 28 + .../github.com/fsnotify/fsnotify/fen.go | 37 + .../github.com/fsnotify/fsnotify/fsnotify.go | 66 + .../github.com/fsnotify/fsnotify/inotify.go | 337 + .../fsnotify/fsnotify/inotify_poller.go | 187 + .../github.com/fsnotify/fsnotify/kqueue.go | 503 + .../fsnotify/fsnotify/open_mode_bsd.go | 11 + .../fsnotify/fsnotify/open_mode_darwin.go | 12 + .../github.com/fsnotify/fsnotify/windows.go | 561 + .../vendor/github.com/hashicorp/hcl/LICENSE | 354 + .../github.com/hashicorp/hcl/decoder.go | 724 + .../vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/fmtcmd/fmtcmd.go | 162 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 520 + .../hashicorp/hcl/hcl/printer/nodes.go | 779 + .../hashicorp/hcl/hcl/printer/printer.go | 66 + .../hashicorp/hcl/hcl/scanner/scanner.go | 651 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + .../vendor/github.com/hashicorp/hcl/lex.go | 38 + .../vendor/github.com/hashicorp/hcl/parse.go | 39 + .../hashicorp/hcl/testhelper/unix2dos.go | 15 + .../inconshreveable/mousetrap/LICENSE | 13 + .../inconshreveable/mousetrap/trap_others.go | 15 + .../inconshreveable/mousetrap/trap_windows.go | 98 + .../mousetrap/trap_windows_1.4.go | 46 + octopus/vendor/github.com/kr/fs/LICENSE | 27 + octopus/vendor/github.com/kr/fs/filesystem.go | 36 + octopus/vendor/github.com/kr/fs/walk.go | 95 + .../github.com/magiconair/properties/LICENSE | 25 + .../magiconair/properties/assert/assert.go | 90 + .../magiconair/properties/decode.go | 289 + .../github.com/magiconair/properties/doc.go | 156 + .../magiconair/properties/integrate.go | 34 + .../github.com/magiconair/properties/lex.go | 408 + .../github.com/magiconair/properties/load.go | 241 + .../magiconair/properties/parser.go | 95 + .../magiconair/properties/properties.go | 808 + .../magiconair/properties/rangecheck.go | 31 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../mitchellh/go-homedir/homedir.go | 137 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/decode_hooks.go | 152 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 828 + .../github.com/pelletier/go-toml/LICENSE | 21 + .../pelletier/go-toml/cmd/test_program.go | 91 + .../pelletier/go-toml/cmd/tomljson/main.go | 72 + .../pelletier/go-toml/cmd/tomll/main.go | 66 + .../github.com/pelletier/go-toml/doc.go | 23 + .../pelletier/go-toml/keysparsing.go | 94 + .../github.com/pelletier/go-toml/lexer.go | 651 + .../github.com/pelletier/go-toml/marshal.go | 489 + .../github.com/pelletier/go-toml/parser.go | 383 + .../github.com/pelletier/go-toml/position.go | 29 + .../github.com/pelletier/go-toml/query/doc.go | 175 + .../pelletier/go-toml/query/lexer.go | 357 + .../pelletier/go-toml/query/match.go | 232 + .../pelletier/go-toml/query/parser.go | 275 + .../pelletier/go-toml/query/query.go | 158 + .../pelletier/go-toml/query/tokens.go | 106 + .../github.com/pelletier/go-toml/token.go | 140 + .../github.com/pelletier/go-toml/toml.go | 292 + .../pelletier/go-toml/tomltree_create.go | 142 + .../pelletier/go-toml/tomltree_write.go | 233 + octopus/vendor/github.com/pkg/errors/LICENSE | 23 + .../vendor/github.com/pkg/errors/errors.go | 269 + octopus/vendor/github.com/pkg/errors/stack.go | 186 + octopus/vendor/github.com/pkg/sftp/LICENSE | 9 + octopus/vendor/github.com/pkg/sftp/attrs.go | 237 + .../vendor/github.com/pkg/sftp/attrs_stubs.go | 11 + .../vendor/github.com/pkg/sftp/attrs_unix.go | 17 + octopus/vendor/github.com/pkg/sftp/client.go | 1131 + octopus/vendor/github.com/pkg/sftp/conn.go | 133 + octopus/vendor/github.com/pkg/sftp/debug.go | 9 + .../examples/buffered-read-benchmark/main.go | 78 + .../examples/buffered-write-benchmark/main.go | 84 + .../pkg/sftp/examples/request-server/main.go | 131 + .../pkg/sftp/examples/sftp-server/main.go | 147 + .../examples/streaming-read-benchmark/main.go | 85 + .../streaming-write-benchmark/main.go | 85 + octopus/vendor/github.com/pkg/sftp/match.go | 345 + .../github.com/pkg/sftp/packet-manager.go | 156 + .../pkg/sftp/packet-manager_go1.8.go | 21 + .../pkg/sftp/packet-manager_legacy.go | 21 + .../github.com/pkg/sftp/packet-typing.go | 141 + octopus/vendor/github.com/pkg/sftp/packet.go | 898 + octopus/vendor/github.com/pkg/sftp/release.go | 5 + .../github.com/pkg/sftp/request-example.go | 220 + .../github.com/pkg/sftp/request-interfaces.go | 30 + .../github.com/pkg/sftp/request-server.go | 231 + .../github.com/pkg/sftp/request-unix.go | 23 + octopus/vendor/github.com/pkg/sftp/request.go | 303 + .../github.com/pkg/sftp/request_windows.go | 11 + octopus/vendor/github.com/pkg/sftp/server.go | 575 + .../pkg/sftp/server_standalone/main.go | 52 + .../pkg/sftp/server_statvfs_darwin.go | 21 + .../pkg/sftp/server_statvfs_impl.go | 25 + .../pkg/sftp/server_statvfs_linux.go | 22 + .../pkg/sftp/server_statvfs_stubs.go | 11 + .../github.com/pkg/sftp/server_stubs.go | 12 + .../vendor/github.com/pkg/sftp/server_unix.go | 143 + octopus/vendor/github.com/pkg/sftp/sftp.go | 217 + .../vendor/github.com/spf13/afero/LICENSE.txt | 174 + .../vendor/github.com/spf13/afero/afero.go | 108 + .../vendor/github.com/spf13/afero/basepath.go | 145 + .../github.com/spf13/afero/cacheOnReadFs.go | 295 + .../github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 25 + .../github.com/spf13/afero/copyOnWriteFs.go | 253 + .../vendor/github.com/spf13/afero/httpFs.go | 110 + .../vendor/github.com/spf13/afero/ioutil.go | 230 + .../vendor/github.com/spf13/afero/mem/dir.go | 37 + .../github.com/spf13/afero/mem/dirmap.go | 43 + .../vendor/github.com/spf13/afero/mem/file.go | 285 + .../vendor/github.com/spf13/afero/memmap.go | 361 + .../vendor/github.com/spf13/afero/memradix.go | 14 + octopus/vendor/github.com/spf13/afero/os.go | 94 + octopus/vendor/github.com/spf13/afero/path.go | 108 + .../github.com/spf13/afero/readonlyfs.go | 70 + .../vendor/github.com/spf13/afero/regexpfs.go | 214 + .../github.com/spf13/afero/sftpfs/file.go | 95 + .../github.com/spf13/afero/sftpfs/sftp.go | 129 + .../github.com/spf13/afero/unionFile.go | 274 + octopus/vendor/github.com/spf13/afero/util.go | 331 + octopus/vendor/github.com/spf13/cast/LICENSE | 21 + octopus/vendor/github.com/spf13/cast/cast.go | 159 + octopus/vendor/github.com/spf13/cast/caste.go | 1146 + .../vendor/github.com/spf13/cobra/LICENSE.txt | 174 + .../spf13/cobra/bash_completions.go | 537 + .../vendor/github.com/spf13/cobra/cobra.go | 181 + .../github.com/spf13/cobra/cobra/cmd/add.go | 179 + .../spf13/cobra/cobra/cmd/helpers.go | 140 + .../github.com/spf13/cobra/cobra/cmd/init.go | 234 + .../spf13/cobra/cobra/cmd/license_agpl.go | 684 + .../spf13/cobra/cobra/cmd/license_apache_2.go | 237 + .../cobra/cobra/cmd/license_bsd_clause_2.go | 72 + .../cobra/cobra/cmd/license_bsd_clause_3.go | 79 + .../spf13/cobra/cobra/cmd/license_gpl_2.go | 377 + .../spf13/cobra/cobra/cmd/license_gpl_3.go | 712 + .../spf13/cobra/cobra/cmd/license_lgpl.go | 187 + .../spf13/cobra/cobra/cmd/license_mit.go | 63 + .../spf13/cobra/cobra/cmd/licenses.go | 114 + .../spf13/cobra/cobra/cmd/project.go | 195 + .../github.com/spf13/cobra/cobra/cmd/root.go | 79 + .../github.com/spf13/cobra/cobra/main.go | 20 + .../vendor/github.com/spf13/cobra/command.go | 1296 + .../github.com/spf13/cobra/command_notwin.go | 5 + .../github.com/spf13/cobra/command_win.go | 26 + .../github.com/spf13/cobra/doc/man_docs.go | 236 + .../github.com/spf13/cobra/doc/md_docs.go | 159 + .../vendor/github.com/spf13/cobra/doc/util.go | 51 + .../github.com/spf13/cobra/doc/yaml_docs.go | 169 + .../spf13/jwalterweatherman/LICENSE | 21 + .../jwalterweatherman/default_notepad.go | 113 + .../spf13/jwalterweatherman/log_counter.go | 55 + .../spf13/jwalterweatherman/notepad.go | 194 + octopus/vendor/github.com/spf13/pflag/LICENSE | 28 + octopus/vendor/github.com/spf13/pflag/bool.go | 94 + .../github.com/spf13/pflag/bool_slice.go | 147 + .../vendor/github.com/spf13/pflag/count.go | 96 + .../vendor/github.com/spf13/pflag/duration.go | 86 + octopus/vendor/github.com/spf13/pflag/flag.go | 1128 + .../vendor/github.com/spf13/pflag/float32.go | 88 + .../vendor/github.com/spf13/pflag/float64.go | 84 + .../github.com/spf13/pflag/golangflag.go | 101 + octopus/vendor/github.com/spf13/pflag/int.go | 84 + .../vendor/github.com/spf13/pflag/int32.go | 88 + .../vendor/github.com/spf13/pflag/int64.go | 84 + octopus/vendor/github.com/spf13/pflag/int8.go | 88 + .../github.com/spf13/pflag/int_slice.go | 128 + octopus/vendor/github.com/spf13/pflag/ip.go | 94 + .../vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../vendor/github.com/spf13/pflag/ipmask.go | 122 + .../vendor/github.com/spf13/pflag/ipnet.go | 98 + .../vendor/github.com/spf13/pflag/string.go | 80 + .../github.com/spf13/pflag/string_array.go | 103 + .../github.com/spf13/pflag/string_slice.go | 129 + octopus/vendor/github.com/spf13/pflag/uint.go | 88 + .../vendor/github.com/spf13/pflag/uint16.go | 88 + .../vendor/github.com/spf13/pflag/uint32.go | 88 + .../vendor/github.com/spf13/pflag/uint64.go | 88 + .../vendor/github.com/spf13/pflag/uint8.go | 88 + .../github.com/spf13/pflag/uint_slice.go | 126 + octopus/vendor/github.com/spf13/viper/LICENSE | 21 + .../vendor/github.com/spf13/viper/flags.go | 57 + .../github.com/spf13/viper/remote/remote.go | 107 + octopus/vendor/github.com/spf13/viper/util.go | 282 + .../vendor/github.com/spf13/viper/viper.go | 1573 + .../github.com/stretchr/testify/LICENCE.txt | 22 + .../github.com/stretchr/testify/LICENSE | 22 + .../testify/assert/assertion_format.go | 379 + .../testify/assert/assertion_forward.go | 746 + .../stretchr/testify/assert/assertions.go | 1208 + .../github.com/stretchr/testify/assert/doc.go | 45 + .../stretchr/testify/assert/errors.go | 10 + .../testify/assert/forward_assertions.go | 16 + .../testify/assert/http_assertions.go | 127 + .../vendor/github.com/stretchr/testify/doc.go | 22 + .../github.com/stretchr/testify/http/doc.go | 2 + .../testify/http/test_response_writer.go | 49 + .../testify/http/test_round_tripper.go | 17 + .../github.com/stretchr/testify/mock/doc.go | 44 + .../github.com/stretchr/testify/mock/mock.go | 776 + .../stretchr/testify/require/doc.go | 28 + .../testify/require/forward_requirements.go | 16 + .../stretchr/testify/require/require.go | 911 + .../testify/require/require_forward.go | 747 + .../stretchr/testify/require/requirements.go | 9 + .../github.com/stretchr/testify/suite/doc.go | 65 + .../stretchr/testify/suite/interfaces.go | 46 + .../stretchr/testify/suite/suite.go | 136 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../github.com/davecgh/go-spew/spew/config.go | 306 + .../github.com/davecgh/go-spew/spew/doc.go | 202 + .../github.com/davecgh/go-spew/spew/dump.go | 509 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + .../pmezard/go-difflib/difflib/difflib.go | 758 + .../github.com/stretchr/objx/accessors.go | 179 + .../github.com/stretchr/objx/constants.go | 13 + .../github.com/stretchr/objx/conversions.go | 117 + .../vendor/github.com/stretchr/objx/doc.go | 72 + .../vendor/github.com/stretchr/objx/map.go | 222 + .../github.com/stretchr/objx/mutations.go | 81 + .../github.com/stretchr/objx/security.go | 14 + .../vendor/github.com/stretchr/objx/tests.go | 17 + .../stretchr/objx/type_specific_codegen.go | 2881 ++ .../vendor/github.com/stretchr/objx/value.go | 13 + .../vendor/github.com/ugorji/go/codec/0doc.go | 199 + .../vendor/github.com/ugorji/go/codec/LICENSE | 22 + .../vendor/github.com/ugorji/go/codec/binc.go | 938 + .../vendor/github.com/ugorji/go/codec/cbor.go | 601 + .../ugorji/go/codec/codecgen/gen.go | 324 + .../github.com/ugorji/go/codec/codecgen/z.go | 3 + .../github.com/ugorji/go/codec/decode.go | 2076 + .../github.com/ugorji/go/codec/decode_go.go | 16 + .../github.com/ugorji/go/codec/decode_go14.go | 14 + .../github.com/ugorji/go/codec/encode.go | 1462 + .../ugorji/go/codec/fast-path.generated.go | 39352 ++++++++++++++++ .../ugorji/go/codec/fast-path.not.go | 34 + .../ugorji/go/codec/gen-helper.generated.go | 243 + .../ugorji/go/codec/gen.generated.go | 175 + .../vendor/github.com/ugorji/go/codec/gen.go | 2020 + .../github.com/ugorji/go/codec/gen_15.go | 12 + .../github.com/ugorji/go/codec/gen_16.go | 12 + .../github.com/ugorji/go/codec/gen_17.go | 10 + .../github.com/ugorji/go/codec/helper.go | 1314 + .../ugorji/go/codec/helper_internal.go | 242 + .../ugorji/go/codec/helper_not_unsafe.go | 36 + .../ugorji/go/codec/helper_unsafe.go | 53 + .../vendor/github.com/ugorji/go/codec/json.go | 1247 + .../github.com/ugorji/go/codec/msgpack.go | 861 + .../vendor/github.com/ugorji/go/codec/noop.go | 213 + .../github.com/ugorji/go/codec/prebuild.go | 3 + .../vendor/github.com/ugorji/go/codec/rpc.go | 180 + .../github.com/ugorji/go/codec/simple.go | 535 + .../vendor/github.com/ugorji/go/codec/time.go | 233 + .../xordataexchange/crypt/backend/LICENSE | 9 + .../xordataexchange/crypt/backend/backend.go | 32 + .../crypt/backend/consul/consul.go | 87 + .../crypt/backend/etcd/etcd.go | 116 + .../crypt/backend/mock/mock.go | 61 + .../xordataexchange/crypt/config/LICENSE | 9 + .../xordataexchange/crypt/config/config.go | 201 + .../crypt/encoding/secconf/LICENSE | 9 + .../crypt/encoding/secconf/secconf.go | 68 + .../vendor/golang.org/x/crypto/cast5/LICENSE | 27 + .../vendor/golang.org/x/crypto/cast5/cast5.go | 526 + .../golang.org/x/crypto/curve25519/LICENSE | 27 + .../x/crypto/curve25519/const_amd64.h | 8 + .../x/crypto/curve25519/const_amd64.s | 20 + .../x/crypto/curve25519/cswap_amd64.s | 65 + .../x/crypto/curve25519/curve25519.go | 834 + .../golang.org/x/crypto/curve25519/doc.go | 23 + .../x/crypto/curve25519/freeze_amd64.s | 73 + .../x/crypto/curve25519/ladderstep_amd64.s | 1377 + .../x/crypto/curve25519/mont25519_amd64.go | 240 + .../x/crypto/curve25519/mul_amd64.s | 169 + .../x/crypto/curve25519/square_amd64.s | 132 + .../golang.org/x/crypto/ed25519/LICENSE | 27 + .../golang.org/x/crypto/ed25519/ed25519.go | 181 + .../ed25519/internal/edwards25519/const.go | 1422 + .../internal/edwards25519/edwards25519.go | 1771 + .../golang.org/x/crypto/openpgp/LICENSE | 27 + .../x/crypto/openpgp/armor/armor.go | 219 + .../x/crypto/openpgp/armor/encode.go | 160 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/clearsign/clearsign.go | 376 + .../x/crypto/openpgp/elgamal/elgamal.go | 122 + .../x/crypto/openpgp/errors/errors.go | 72 + .../golang.org/x/crypto/openpgp/keys.go | 637 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 199 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 + .../x/crypto/openpgp/packet/packet.go | 537 + .../x/crypto/openpgp/packet/private_key.go | 380 + .../x/crypto/openpgp/packet/public_key.go | 748 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 91 + .../x/crypto/openpgp/packet/userid.go | 160 + .../golang.org/x/crypto/openpgp/read.go | 442 + .../golang.org/x/crypto/openpgp/s2k/s2k.go | 273 + .../golang.org/x/crypto/openpgp/write.go | 378 + .../vendor/golang.org/x/crypto/ssh/LICENSE | 27 + .../golang.org/x/crypto/ssh/agent/client.go | 659 + .../golang.org/x/crypto/ssh/agent/forward.go | 103 + .../golang.org/x/crypto/ssh/agent/keyring.go | 215 + .../golang.org/x/crypto/ssh/agent/server.go | 451 + .../vendor/golang.org/x/crypto/ssh/buffer.go | 98 + .../vendor/golang.org/x/crypto/ssh/certs.go | 519 + .../vendor/golang.org/x/crypto/ssh/channel.go | 633 + .../vendor/golang.org/x/crypto/ssh/cipher.go | 629 + .../vendor/golang.org/x/crypto/ssh/client.go | 257 + .../golang.org/x/crypto/ssh/client_auth.go | 486 + .../vendor/golang.org/x/crypto/ssh/common.go | 373 + .../golang.org/x/crypto/ssh/connection.go | 143 + octopus/vendor/golang.org/x/crypto/ssh/doc.go | 21 + .../golang.org/x/crypto/ssh/handshake.go | 640 + octopus/vendor/golang.org/x/crypto/ssh/kex.go | 540 + .../vendor/golang.org/x/crypto/ssh/keys.go | 1006 + .../x/crypto/ssh/knownhosts/knownhosts.go | 546 + octopus/vendor/golang.org/x/crypto/ssh/mac.go | 61 + .../golang.org/x/crypto/ssh/messages.go | 758 + octopus/vendor/golang.org/x/crypto/ssh/mux.go | 330 + .../vendor/golang.org/x/crypto/ssh/server.go | 563 + .../vendor/golang.org/x/crypto/ssh/session.go | 627 + .../golang.org/x/crypto/ssh/streamlocal.go | 115 + .../vendor/golang.org/x/crypto/ssh/tcpip.go | 465 + .../x/crypto/ssh/terminal/terminal.go | 951 + .../golang.org/x/crypto/ssh/terminal/util.go | 119 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 11 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 128 + .../x/crypto/ssh/terminal/util_windows.go | 155 + .../golang.org/x/crypto/ssh/test/doc.go | 7 + .../golang.org/x/crypto/ssh/transport.go | 375 + .../vendor/golang.org/x/net/context/LICENSE | 27 + .../golang.org/x/net/context/context.go | 156 + .../x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + .../vendor/golang.org/x/net/context/go17.go | 72 + .../golang.org/x/net/context/pre_go17.go | 300 + octopus/vendor/golang.org/x/sys/unix/LICENSE | 27 + .../golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + .../golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_linux_386.s | 35 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 29 + .../golang.org/x/sys/unix/asm_linux_arm.s | 29 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 24 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 28 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 31 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 28 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 28 + .../golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 35 + .../vendor/golang.org/x/sys/unix/constants.go | 13 + .../vendor/golang.org/x/sys/unix/dirent.go | 102 + .../golang.org/x/sys/unix/endian_big.go | 9 + .../golang.org/x/sys/unix/endian_little.go | 9 + .../vendor/golang.org/x/sys/unix/env_unix.go | 27 + .../vendor/golang.org/x/sys/unix/env_unset.go | 14 + octopus/vendor/golang.org/x/sys/unix/flock.go | 24 + .../x/sys/unix/flock_linux_32bit.go | 13 + octopus/vendor/golang.org/x/sys/unix/gccgo.go | 46 + .../vendor/golang.org/x/sys/unix/gccgo_c.c | 41 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + .../x/sys/unix/gccgo_linux_sparc64.go | 20 + .../golang.org/x/sys/unix/linux/mkall.go | 379 + .../golang.org/x/sys/unix/linux/types.go | 545 + .../vendor/golang.org/x/sys/unix/mkpost.go | 88 + .../golang.org/x/sys/unix/openbsd_pledge.go | 38 + octopus/vendor/golang.org/x/sys/unix/race.go | 30 + octopus/vendor/golang.org/x/sys/unix/race0.go | 25 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + .../golang.org/x/sys/unix/sockcmsg_unix.go | 104 + octopus/vendor/golang.org/x/sys/unix/str.go | 26 + .../vendor/golang.org/x/sys/unix/syscall.go | 69 + .../golang.org/x/sys/unix/syscall_bsd.go | 614 + .../golang.org/x/sys/unix/syscall_darwin.go | 493 + .../x/sys/unix/syscall_darwin_386.go | 77 + .../x/sys/unix/syscall_darwin_amd64.go | 79 + .../x/sys/unix/syscall_darwin_arm.go | 71 + .../x/sys/unix/syscall_darwin_arm64.go | 77 + .../x/sys/unix/syscall_dragonfly.go | 425 + .../x/sys/unix/syscall_dragonfly_amd64.go | 61 + .../golang.org/x/sys/unix/syscall_freebsd.go | 666 + .../x/sys/unix/syscall_freebsd_386.go | 61 + .../x/sys/unix/syscall_freebsd_amd64.go | 61 + .../x/sys/unix/syscall_freebsd_arm.go | 61 + .../golang.org/x/sys/unix/syscall_linux.go | 1445 + .../x/sys/unix/syscall_linux_386.go | 399 + .../x/sys/unix/syscall_linux_amd64.go | 152 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 263 + .../x/sys/unix/syscall_linux_arm64.go | 190 + .../x/sys/unix/syscall_linux_mips64x.go | 209 + .../x/sys/unix/syscall_linux_mipsx.go | 239 + .../x/sys/unix/syscall_linux_ppc64x.go | 135 + .../x/sys/unix/syscall_linux_s390x.go | 328 + .../x/sys/unix/syscall_linux_sparc64.go | 169 + .../golang.org/x/sys/unix/syscall_netbsd.go | 476 + .../x/sys/unix/syscall_netbsd_386.go | 42 + .../x/sys/unix/syscall_netbsd_amd64.go | 42 + .../x/sys/unix/syscall_netbsd_arm.go | 42 + .../golang.org/x/sys/unix/syscall_no_getwd.go | 11 + .../golang.org/x/sys/unix/syscall_openbsd.go | 287 + .../x/sys/unix/syscall_openbsd_386.go | 42 + .../x/sys/unix/syscall_openbsd_amd64.go | 42 + .../golang.org/x/sys/unix/syscall_solaris.go | 715 + .../x/sys/unix/syscall_solaris_amd64.go | 35 + .../golang.org/x/sys/unix/syscall_unix.go | 293 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../golang.org/x/sys/unix/types_darwin.go | 250 + .../golang.org/x/sys/unix/types_dragonfly.go | 242 + .../golang.org/x/sys/unix/types_freebsd.go | 353 + .../golang.org/x/sys/unix/types_netbsd.go | 232 + .../golang.org/x/sys/unix/types_openbsd.go | 244 + .../golang.org/x/sys/unix/types_solaris.go | 269 + .../x/sys/unix/zerrors_darwin_386.go | 1576 + .../x/sys/unix/zerrors_darwin_amd64.go | 1576 + .../x/sys/unix/zerrors_darwin_arm.go | 1293 + .../x/sys/unix/zerrors_darwin_arm64.go | 1576 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1568 + .../x/sys/unix/zerrors_freebsd_386.go | 1743 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1748 + .../x/sys/unix/zerrors_freebsd_arm.go | 1729 + .../x/sys/unix/zerrors_linux_386.go | 2156 + .../x/sys/unix/zerrors_linux_amd64.go | 2157 + .../x/sys/unix/zerrors_linux_arm.go | 2161 + .../x/sys/unix/zerrors_linux_arm64.go | 2146 + .../x/sys/unix/zerrors_linux_mips.go | 2165 + .../x/sys/unix/zerrors_linux_mips64.go | 2165 + .../x/sys/unix/zerrors_linux_mips64le.go | 2165 + .../x/sys/unix/zerrors_linux_mipsle.go | 2165 + .../x/sys/unix/zerrors_linux_ppc64.go | 2219 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2219 + .../x/sys/unix/zerrors_linux_s390x.go | 2218 + .../x/sys/unix/zerrors_linux_sparc64.go | 2142 + .../x/sys/unix/zerrors_netbsd_386.go | 1712 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1702 + .../x/sys/unix/zerrors_netbsd_arm.go | 1688 + .../x/sys/unix/zerrors_openbsd_386.go | 1584 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1583 + .../x/sys/unix/zerrors_solaris_amd64.go | 1483 + .../x/sys/unix/zsyscall_darwin_386.go | 1394 + .../x/sys/unix/zsyscall_darwin_amd64.go | 1409 + .../x/sys/unix/zsyscall_darwin_arm.go | 1394 + .../x/sys/unix/zsyscall_darwin_arm64.go | 1394 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1393 + .../x/sys/unix/zsyscall_freebsd_386.go | 1617 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1617 + .../x/sys/unix/zsyscall_freebsd_arm.go | 1617 + .../x/sys/unix/zsyscall_linux_386.go | 1927 + .../x/sys/unix/zsyscall_linux_amd64.go | 2120 + .../x/sys/unix/zsyscall_linux_arm.go | 2029 + .../x/sys/unix/zsyscall_linux_arm64.go | 2003 + .../x/sys/unix/zsyscall_linux_mips.go | 2085 + .../x/sys/unix/zsyscall_linux_mips64.go | 2079 + .../x/sys/unix/zsyscall_linux_mips64le.go | 2079 + .../x/sys/unix/zsyscall_linux_mipsle.go | 2085 + .../x/sys/unix/zsyscall_linux_ppc64.go | 2131 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 2131 + .../x/sys/unix/zsyscall_linux_s390x.go | 1911 + .../x/sys/unix/zsyscall_linux_sparc64.go | 1833 + .../x/sys/unix/zsyscall_netbsd_386.go | 1299 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1299 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1299 + .../x/sys/unix/zsyscall_openbsd_386.go | 1357 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1357 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1589 + .../golang.org/x/sys/unix/zsysctl_openbsd.go | 270 + .../x/sys/unix/zsysnum_darwin_386.go | 398 + .../x/sys/unix/zsysnum_darwin_amd64.go | 398 + .../x/sys/unix/zsysnum_darwin_arm.go | 358 + .../x/sys/unix/zsysnum_darwin_arm64.go | 398 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../x/sys/unix/zsysnum_freebsd_386.go | 351 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 351 + .../x/sys/unix/zsysnum_freebsd_arm.go | 351 + .../x/sys/unix/zsysnum_linux_386.go | 388 + .../x/sys/unix/zsysnum_linux_amd64.go | 341 + .../x/sys/unix/zsysnum_linux_arm.go | 361 + .../x/sys/unix/zsysnum_linux_arm64.go | 285 + .../x/sys/unix/zsysnum_linux_mips.go | 374 + .../x/sys/unix/zsysnum_linux_mips64.go | 334 + .../x/sys/unix/zsysnum_linux_mips64le.go | 334 + .../x/sys/unix/zsysnum_linux_mipsle.go | 374 + .../x/sys/unix/zsysnum_linux_ppc64.go | 369 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 369 + .../x/sys/unix/zsysnum_linux_s390x.go | 331 + .../x/sys/unix/zsysnum_linux_sparc64.go | 348 + .../x/sys/unix/zsysnum_netbsd_386.go | 273 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 273 + .../x/sys/unix/zsysnum_netbsd_arm.go | 273 + .../x/sys/unix/zsysnum_openbsd_386.go | 207 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 207 + .../x/sys/unix/zsysnum_solaris_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_386.go | 447 + .../x/sys/unix/ztypes_darwin_amd64.go | 462 + .../x/sys/unix/ztypes_darwin_arm.go | 449 + .../x/sys/unix/ztypes_darwin_arm64.go | 457 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 443 + .../x/sys/unix/ztypes_freebsd_386.go | 502 + .../x/sys/unix/ztypes_freebsd_amd64.go | 505 + .../x/sys/unix/ztypes_freebsd_arm.go | 497 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 678 + .../x/sys/unix/ztypes_linux_amd64.go | 696 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 667 + .../x/sys/unix/ztypes_linux_arm64.go | 675 + .../x/sys/unix/ztypes_linux_mips.go | 672 + .../x/sys/unix/ztypes_linux_mips64.go | 677 + .../x/sys/unix/ztypes_linux_mips64le.go | 677 + .../x/sys/unix/ztypes_linux_mipsle.go | 672 + .../x/sys/unix/ztypes_linux_ppc64.go | 685 + .../x/sys/unix/ztypes_linux_ppc64le.go | 685 + .../x/sys/unix/ztypes_linux_s390x.go | 702 + .../x/sys/unix/ztypes_linux_sparc64.go | 666 + .../x/sys/unix/ztypes_netbsd_386.go | 396 + .../x/sys/unix/ztypes_netbsd_amd64.go | 403 + .../x/sys/unix/ztypes_netbsd_arm.go | 401 + .../x/sys/unix/ztypes_openbsd_386.go | 441 + .../x/sys/unix/ztypes_openbsd_amd64.go | 448 + .../x/sys/unix/ztypes_solaris_amd64.go | 442 + .../golang.org/x/text/internal/gen/LICENSE | 27 + .../golang.org/x/text/internal/gen/code.go | 351 + .../golang.org/x/text/internal/gen/gen.go | 281 + .../x/text/internal/triegen/LICENSE | 27 + .../x/text/internal/triegen/compact.go | 58 + .../x/text/internal/triegen/print.go | 251 + .../x/text/internal/triegen/triegen.go | 494 + .../golang.org/x/text/internal/ucd/LICENSE | 27 + .../golang.org/x/text/internal/ucd/ucd.go | 376 + .../golang.org/x/text/transform/LICENSE | 27 + .../golang.org/x/text/transform/transform.go | 705 + .../golang.org/x/text/unicode/cldr/LICENSE | 27 + .../golang.org/x/text/unicode/cldr/base.go | 100 + .../golang.org/x/text/unicode/cldr/cldr.go | 130 + .../golang.org/x/text/unicode/cldr/collate.go | 359 + .../golang.org/x/text/unicode/cldr/decode.go | 171 + .../golang.org/x/text/unicode/cldr/makexml.go | 400 + .../golang.org/x/text/unicode/cldr/resolve.go | 602 + .../golang.org/x/text/unicode/cldr/slice.go | 144 + .../golang.org/x/text/unicode/cldr/xml.go | 1487 + .../golang.org/x/text/unicode/norm/LICENSE | 27 + .../x/text/unicode/norm/composition.go | 508 + .../x/text/unicode/norm/forminfo.go | 259 + .../golang.org/x/text/unicode/norm/input.go | 109 + .../golang.org/x/text/unicode/norm/iter.go | 457 + .../x/text/unicode/norm/maketables.go | 976 + .../x/text/unicode/norm/normalize.go | 609 + .../x/text/unicode/norm/readwriter.go | 125 + .../golang.org/x/text/unicode/norm/tables.go | 7631 +++ .../x/text/unicode/norm/transform.go | 88 + .../golang.org/x/text/unicode/norm/trie.go | 54 + .../golang.org/x/text/unicode/norm/triegen.go | 117 + octopus/vendor/gopkg.in/yaml.v2/LICENSE | 13 + octopus/vendor/gopkg.in/yaml.v2/apic.go | 742 + octopus/vendor/gopkg.in/yaml.v2/decode.go | 682 + octopus/vendor/gopkg.in/yaml.v2/emitterc.go | 1684 + octopus/vendor/gopkg.in/yaml.v2/encode.go | 306 + octopus/vendor/gopkg.in/yaml.v2/parserc.go | 1095 + octopus/vendor/gopkg.in/yaml.v2/readerc.go | 394 + octopus/vendor/gopkg.in/yaml.v2/resolve.go | 208 + octopus/vendor/gopkg.in/yaml.v2/scannerc.go | 2710 ++ octopus/vendor/gopkg.in/yaml.v2/sorter.go | 104 + octopus/vendor/gopkg.in/yaml.v2/writerc.go | 89 + octopus/vendor/gopkg.in/yaml.v2/yaml.go | 346 + octopus/vendor/gopkg.in/yaml.v2/yamlh.go | 716 + .../vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + octopus/vendor/manifest | 391 + 655 files changed, 298360 insertions(+) create mode 100644 octopus/vendor/github.com/armon/consul-api/LICENSE create mode 100644 octopus/vendor/github.com/armon/consul-api/acl.go create mode 100644 octopus/vendor/github.com/armon/consul-api/agent.go create mode 100644 octopus/vendor/github.com/armon/consul-api/api.go create mode 100644 octopus/vendor/github.com/armon/consul-api/catalog.go create mode 100644 octopus/vendor/github.com/armon/consul-api/event.go create mode 100644 octopus/vendor/github.com/armon/consul-api/health.go create mode 100644 octopus/vendor/github.com/armon/consul-api/kv.go create mode 100644 octopus/vendor/github.com/armon/consul-api/session.go create mode 100644 octopus/vendor/github.com/armon/consul-api/status.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/LICENSE create mode 100644 octopus/vendor/github.com/coreos/etcd/client/auth_role.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/auth_user.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/cancelreq.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/client.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/cluster_error.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/curl.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/discover.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/doc.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/integration/doc.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/keys.generated.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/keys.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/members.go create mode 100644 octopus/vendor/github.com/coreos/etcd/client/util.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/pathutil/path.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/srv/LICENSE create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/srv/srv.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/LICENSE create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/doc.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/id.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/set.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/slice.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/urls.go create mode 100644 octopus/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go create mode 100644 octopus/vendor/github.com/coreos/etcd/version/LICENSE create mode 100644 octopus/vendor/github.com/coreos/etcd/version/version.go create mode 100644 octopus/vendor/github.com/coreos/go-semver/semver/LICENSE create mode 100644 octopus/vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 octopus/vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/md2man/LICENSE.md create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/LICENSE.md create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/block.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/html.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/inline.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/latex.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/markdown.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE.md create mode 100644 octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/shurcooL/sanitized_anchor_name/main.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 octopus/vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 octopus/vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/lex.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/parse.go create mode 100644 octopus/vendor/github.com/hashicorp/hcl/testhelper/unix2dos.go create mode 100644 octopus/vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 octopus/vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 octopus/vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 octopus/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 octopus/vendor/github.com/kr/fs/LICENSE create mode 100644 octopus/vendor/github.com/kr/fs/filesystem.go create mode 100644 octopus/vendor/github.com/kr/fs/walk.go create mode 100644 octopus/vendor/github.com/magiconair/properties/LICENSE create mode 100644 octopus/vendor/github.com/magiconair/properties/assert/assert.go create mode 100644 octopus/vendor/github.com/magiconair/properties/decode.go create mode 100644 octopus/vendor/github.com/magiconair/properties/doc.go create mode 100644 octopus/vendor/github.com/magiconair/properties/integrate.go create mode 100644 octopus/vendor/github.com/magiconair/properties/lex.go create mode 100644 octopus/vendor/github.com/magiconair/properties/load.go create mode 100644 octopus/vendor/github.com/magiconair/properties/parser.go create mode 100644 octopus/vendor/github.com/magiconair/properties/properties.go create mode 100644 octopus/vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 octopus/vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 octopus/vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 octopus/vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 octopus/vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 octopus/vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 octopus/vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 octopus/vendor/github.com/pelletier/go-toml/cmd/test_program.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/doc.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/parser.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/position.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/doc.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/lexer.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/match.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/parser.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/query.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/query/tokens.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/token.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/toml.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 octopus/vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 octopus/vendor/github.com/pkg/errors/LICENSE create mode 100644 octopus/vendor/github.com/pkg/errors/errors.go create mode 100644 octopus/vendor/github.com/pkg/errors/stack.go create mode 100644 octopus/vendor/github.com/pkg/sftp/LICENSE create mode 100644 octopus/vendor/github.com/pkg/sftp/attrs.go create mode 100644 octopus/vendor/github.com/pkg/sftp/attrs_stubs.go create mode 100644 octopus/vendor/github.com/pkg/sftp/attrs_unix.go create mode 100644 octopus/vendor/github.com/pkg/sftp/client.go create mode 100644 octopus/vendor/github.com/pkg/sftp/conn.go create mode 100644 octopus/vendor/github.com/pkg/sftp/debug.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/request-server/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/sftp-server/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/match.go create mode 100644 octopus/vendor/github.com/pkg/sftp/packet-manager.go create mode 100644 octopus/vendor/github.com/pkg/sftp/packet-manager_go1.8.go create mode 100644 octopus/vendor/github.com/pkg/sftp/packet-manager_legacy.go create mode 100644 octopus/vendor/github.com/pkg/sftp/packet-typing.go create mode 100644 octopus/vendor/github.com/pkg/sftp/packet.go create mode 100644 octopus/vendor/github.com/pkg/sftp/release.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request-example.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request-interfaces.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request-server.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request-unix.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request.go create mode 100644 octopus/vendor/github.com/pkg/sftp/request_windows.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_standalone/main.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_statvfs_darwin.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_statvfs_impl.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_statvfs_linux.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_statvfs_stubs.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_stubs.go create mode 100644 octopus/vendor/github.com/pkg/sftp/server_unix.go create mode 100644 octopus/vendor/github.com/pkg/sftp/sftp.go create mode 100644 octopus/vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 octopus/vendor/github.com/spf13/afero/afero.go create mode 100644 octopus/vendor/github.com/spf13/afero/basepath.go create mode 100644 octopus/vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 octopus/vendor/github.com/spf13/afero/const_bsds.go create mode 100644 octopus/vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 octopus/vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 octopus/vendor/github.com/spf13/afero/httpFs.go create mode 100644 octopus/vendor/github.com/spf13/afero/ioutil.go create mode 100644 octopus/vendor/github.com/spf13/afero/mem/dir.go create mode 100644 octopus/vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 octopus/vendor/github.com/spf13/afero/mem/file.go create mode 100644 octopus/vendor/github.com/spf13/afero/memmap.go create mode 100644 octopus/vendor/github.com/spf13/afero/memradix.go create mode 100644 octopus/vendor/github.com/spf13/afero/os.go create mode 100644 octopus/vendor/github.com/spf13/afero/path.go create mode 100644 octopus/vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 octopus/vendor/github.com/spf13/afero/regexpfs.go create mode 100644 octopus/vendor/github.com/spf13/afero/sftpfs/file.go create mode 100644 octopus/vendor/github.com/spf13/afero/sftpfs/sftp.go create mode 100644 octopus/vendor/github.com/spf13/afero/unionFile.go create mode 100644 octopus/vendor/github.com/spf13/afero/util.go create mode 100644 octopus/vendor/github.com/spf13/cast/LICENSE create mode 100644 octopus/vendor/github.com/spf13/cast/cast.go create mode 100644 octopus/vendor/github.com/spf13/cast/caste.go create mode 100644 octopus/vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 octopus/vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/add.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/init.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/project.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/cmd/root.go create mode 100644 octopus/vendor/github.com/spf13/cobra/cobra/main.go create mode 100644 octopus/vendor/github.com/spf13/cobra/command.go create mode 100644 octopus/vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 octopus/vendor/github.com/spf13/cobra/command_win.go create mode 100644 octopus/vendor/github.com/spf13/cobra/doc/man_docs.go create mode 100644 octopus/vendor/github.com/spf13/cobra/doc/md_docs.go create mode 100644 octopus/vendor/github.com/spf13/cobra/doc/util.go create mode 100644 octopus/vendor/github.com/spf13/cobra/doc/yaml_docs.go create mode 100644 octopus/vendor/github.com/spf13/jwalterweatherman/LICENSE create mode 100644 octopus/vendor/github.com/spf13/jwalterweatherman/default_notepad.go create mode 100644 octopus/vendor/github.com/spf13/jwalterweatherman/log_counter.go create mode 100644 octopus/vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 octopus/vendor/github.com/spf13/pflag/LICENSE create mode 100644 octopus/vendor/github.com/spf13/pflag/bool.go create mode 100644 octopus/vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 octopus/vendor/github.com/spf13/pflag/count.go create mode 100644 octopus/vendor/github.com/spf13/pflag/duration.go create mode 100644 octopus/vendor/github.com/spf13/pflag/flag.go create mode 100644 octopus/vendor/github.com/spf13/pflag/float32.go create mode 100644 octopus/vendor/github.com/spf13/pflag/float64.go create mode 100644 octopus/vendor/github.com/spf13/pflag/golangflag.go create mode 100644 octopus/vendor/github.com/spf13/pflag/int.go create mode 100644 octopus/vendor/github.com/spf13/pflag/int32.go create mode 100644 octopus/vendor/github.com/spf13/pflag/int64.go create mode 100644 octopus/vendor/github.com/spf13/pflag/int8.go create mode 100644 octopus/vendor/github.com/spf13/pflag/int_slice.go create mode 100644 octopus/vendor/github.com/spf13/pflag/ip.go create mode 100644 octopus/vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 octopus/vendor/github.com/spf13/pflag/ipmask.go create mode 100644 octopus/vendor/github.com/spf13/pflag/ipnet.go create mode 100644 octopus/vendor/github.com/spf13/pflag/string.go create mode 100644 octopus/vendor/github.com/spf13/pflag/string_array.go create mode 100644 octopus/vendor/github.com/spf13/pflag/string_slice.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint16.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint32.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint64.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint8.go create mode 100644 octopus/vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 octopus/vendor/github.com/spf13/viper/LICENSE create mode 100644 octopus/vendor/github.com/spf13/viper/flags.go create mode 100644 octopus/vendor/github.com/spf13/viper/remote/remote.go create mode 100644 octopus/vendor/github.com/spf13/viper/util.go create mode 100644 octopus/vendor/github.com/spf13/viper/viper.go create mode 100644 octopus/vendor/github.com/stretchr/testify/LICENCE.txt create mode 100644 octopus/vendor/github.com/stretchr/testify/LICENSE create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/assertion_format.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/assertion_forward.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/assertions.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/errors.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 octopus/vendor/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 octopus/vendor/github.com/stretchr/testify/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/http/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/http/test_response_writer.go create mode 100644 octopus/vendor/github.com/stretchr/testify/http/test_round_tripper.go create mode 100644 octopus/vendor/github.com/stretchr/testify/mock/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/mock/mock.go create mode 100644 octopus/vendor/github.com/stretchr/testify/require/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 octopus/vendor/github.com/stretchr/testify/require/require.go create mode 100644 octopus/vendor/github.com/stretchr/testify/require/require_forward.go create mode 100644 octopus/vendor/github.com/stretchr/testify/require/requirements.go create mode 100644 octopus/vendor/github.com/stretchr/testify/suite/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/suite/interfaces.go create mode 100644 octopus/vendor/github.com/stretchr/testify/suite/suite.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 octopus/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/0doc.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/LICENSE create mode 100644 octopus/vendor/github.com/ugorji/go/codec/binc.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/cbor.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/codecgen/gen.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/codecgen/z.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/decode.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/decode_go.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/decode_go14.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/encode.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen.generated.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen_15.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen_16.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/gen_17.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/helper.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/helper_internal.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/json.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/msgpack.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/noop.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/prebuild.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/rpc.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/simple.go create mode 100644 octopus/vendor/github.com/ugorji/go/codec/time.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/backend/LICENSE create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/backend/backend.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/backend/consul/consul.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/backend/etcd/etcd.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/backend/mock/mock.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/config/LICENSE create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/config/config.go create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/encoding/secconf/LICENSE create mode 100644 octopus/vendor/github.com/xordataexchange/crypt/encoding/secconf/secconf.go create mode 100644 octopus/vendor/golang.org/x/crypto/cast5/LICENSE create mode 100644 octopus/vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/LICENSE create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/const_amd64.h create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/const_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/doc.go create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/mul_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/curve25519/square_amd64.s create mode 100644 octopus/vendor/golang.org/x/crypto/ed25519/LICENSE create mode 100644 octopus/vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 octopus/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 octopus/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/LICENSE create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 octopus/vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/LICENSE create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/agent/client.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/agent/forward.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/agent/keyring.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/agent/server.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/buffer.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/certs.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/channel.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/cipher.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/client.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/client_auth.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/common.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/connection.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/doc.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/handshake.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/kex.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/keys.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/mac.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/messages.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/mux.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/server.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/session.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/streamlocal.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/tcpip.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/test/doc.go create mode 100644 octopus/vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 octopus/vendor/golang.org/x/net/context/LICENSE create mode 100644 octopus/vendor/golang.org/x/net/context/context.go create mode 100644 octopus/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 octopus/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 octopus/vendor/golang.org/x/net/context/go17.go create mode 100644 octopus/vendor/golang.org/x/net/context/pre_go17.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/LICENSE create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 octopus/vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/constants.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/dirent.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/env_unset.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/flock.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/flock_linux_32bit.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 octopus/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/linux/mkall.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/linux/types.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/race.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/race0.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/str.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_no_getwd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 octopus/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 octopus/vendor/golang.org/x/text/internal/gen/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/internal/gen/code.go create mode 100644 octopus/vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 octopus/vendor/golang.org/x/text/internal/triegen/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/internal/triegen/compact.go create mode 100644 octopus/vendor/golang.org/x/text/internal/triegen/print.go create mode 100644 octopus/vendor/golang.org/x/text/internal/triegen/triegen.go create mode 100644 octopus/vendor/golang.org/x/text/internal/ucd/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/internal/ucd/ucd.go create mode 100644 octopus/vendor/golang.org/x/text/transform/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/transform/transform.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/LICENSE create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/tables.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 octopus/vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 octopus/vendor/gopkg.in/yaml.v2/apic.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/decode.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/encode.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 octopus/vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 octopus/vendor/manifest diff --git a/octopus/vendor/github.com/armon/consul-api/LICENSE b/octopus/vendor/github.com/armon/consul-api/LICENSE new file mode 100644 index 0000000..f0e5c79 --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/octopus/vendor/github.com/armon/consul-api/acl.go b/octopus/vendor/github.com/armon/consul-api/acl.go new file mode 100644 index 0000000..e0179f5 --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/acl.go @@ -0,0 +1,140 @@ +package consulapi + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/agent.go b/octopus/vendor/github.com/armon/consul-api/agent.go new file mode 100644 index 0000000..eec93cb --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/agent.go @@ -0,0 +1,272 @@ +package consulapi + +import ( + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Check *AgentServiceCheck +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to create an associated +// check for a service +type AgentServiceCheck struct { + Script string `json:",omitempty"` + Interval string `json:",omitempty"` + TTL string `json:",omitempty"` +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state +func (a *Agent) PassTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state +func (a *Agent) WarnTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state +func (a *Agent) FailTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "fail") +} + +// UpdateTTL is used to update the TTL of a check +func (a *Agent) UpdateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/api.go b/octopus/vendor/github.com/armon/consul-api/api.go new file mode 100644 index 0000000..e133576 --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/api.go @@ -0,0 +1,323 @@ +package consulapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overriden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + return &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: http.DefaultClient, + } +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// durToMsec converts a duration to a millisecond specified string +func durToMsec(dur time.Duration) string { + return fmt.Sprintf("%dms", dur/time.Millisecond) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Get the url sring + urlRaw := r.url.String() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, urlRaw, r.body) + + // Setup auth + if err == nil && r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + return req, err +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.params.Set("token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + return d, resp, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, e +} diff --git a/octopus/vendor/github.com/armon/consul-api/catalog.go b/octopus/vendor/github.com/armon/consul-api/catalog.go new file mode 100644 index 0000000..8080e2a --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/catalog.go @@ -0,0 +1,181 @@ +package consulapi + +type Node struct { + Node string + Address string +} + +type CatalogService struct { + Node string + Address string + ServiceID string + ServiceName string + ServiceTags []string + ServicePort int +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + Node string + Address string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/event.go b/octopus/vendor/github.com/armon/consul-api/event.go new file mode 100644 index 0000000..59813d4 --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/event.go @@ -0,0 +1,104 @@ +package consulapi + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/octopus/vendor/github.com/armon/consul-api/health.go b/octopus/vendor/github.com/armon/consul-api/health.go new file mode 100644 index 0000000..574801e --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/health.go @@ -0,0 +1,136 @@ +package consulapi + +import ( + "fmt" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks []*HealthCheck +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set("passing", "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retreive all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + switch state { + case "any": + case "warning": + case "critical": + case "passing": + case "unknown": + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/kv.go b/octopus/vendor/github.com/armon/consul-api/kv.go new file mode 100644 index 0000000..98c3b1a --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/kv.go @@ -0,0 +1,219 @@ +package consulapi + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + Key string + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Value []byte + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+key) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisiiton operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + return k.deleteInternal(key, nil, w) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + return k.deleteInternal(prefix, []string{"recurse"}, w) +} + +func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r.setWriteOptions(q) + for _, param := range params { + r.params.Set(param, "") + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + return qm, nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/session.go b/octopus/vendor/github.com/armon/consul-api/session.go new file mode 100644 index 0000000..4fbfc5e --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/session.go @@ -0,0 +1,204 @@ +package consulapi + +import ( + "time" +) + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/create") + r.setWriteOptions(q) + r.obj = obj + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalides a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, wm, err + } + + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/session/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/octopus/vendor/github.com/armon/consul-api/status.go b/octopus/vendor/github.com/armon/consul-api/status.go new file mode 100644 index 0000000..21c3198 --- /dev/null +++ b/octopus/vendor/github.com/armon/consul-api/status.go @@ -0,0 +1,43 @@ +package consulapi + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/LICENSE b/octopus/vendor/github.com/coreos/etcd/client/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/etcd/client/auth_role.go b/octopus/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 0000000..d15e00d --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,237 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + + "golang.org/x/net/context" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/auth_user.go b/octopus/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 0000000..97c3f31 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,320 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "path" + + "golang.org/x/net/context" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/cancelreq.go b/octopus/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 0000000..76d1f04 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/client.go b/octopus/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 0000000..498dfbc --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,703 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/version" + + "golang.org/x/net/context" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + if isOneShot { + return nil, nil, err + } + continue + } + if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + if isOneShot { + return nil, nil, cerr.Errors[0] + } + continue + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + p := r.Perm(len(eps)) + neps := make([]url.URL, len(eps)) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/cluster_error.go b/octopus/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 0000000..34618cd --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/curl.go b/octopus/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 0000000..c8bc9fb --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/discover.go b/octopus/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 0000000..442e35f --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/coreos/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/doc.go b/octopus/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 0000000..dd336d1 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + + "github.com/coreos/etcd/client" + "golang.org/x/net/context" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/octopus/vendor/github.com/coreos/etcd/client/integration/doc.go b/octopus/vendor/github.com/coreos/etcd/client/integration/doc.go new file mode 100644 index 0000000..e9c58d6 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/integration/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package integration implements tests built upon embedded etcd, focusing on +// the correctness of the etcd v2 client. +package integration diff --git a/octopus/vendor/github.com/coreos/etcd/client/keys.generated.go b/octopus/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 0000000..216139c --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,1087 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81819 = 1 + codecSelferC_RAW1819 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1819 = 10 + codecSelferValueTypeMap1819 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1819 = 2 + codecSelfer_containerMapValue1819 = 3 + codecSelfer_containerMapEnd1819 = 4 + codecSelfer_containerArrayElem1819 = 6 + codecSelfer_containerArrayEnd1819 = 7 +) + +var ( + codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1819 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Time + _ = v0 + } +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("action")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("node")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("prevNode")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1819 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1819 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys3 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv4 := &x.Action + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "node": + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv9 := &x.Action + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Dir != false + yyq2[6] = x.Expiration != nil + yyq2[7] = x.TTL != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("dir")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("nodes")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("expiration")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Expiration == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("ttl")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1819 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1819 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv10 := &x.Nodes + yyv10.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv11 := &x.CreatedIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv13 := &x.ModifiedIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + case "expiration": + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { + r.DecodeBuiltin(yym17, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym16 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv18 := &x.TTL + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(yyv18)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv21 := &x.Key + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv23 := &x.Dir + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv25 := &x.Value + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv27 := &x.Nodes + yyv27.CodecDecodeSelf(d) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv28 := &x.CreatedIndex + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv30 := &x.ModifiedIndex + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym33 := z.DecBinary() + _ = yym33 + if false { + } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { + r.DecodeBuiltin(yym34, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym33 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym33 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Node, yyrl1) + } + } else { + yyv1 = make([]*Node, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *Node + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/keys.go b/octopus/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 0000000..4a6c41a --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,682 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" + "golang.org/x/net/context" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + + return +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/members.go b/octopus/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 0000000..2054895 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,304 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves its client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/octopus/vendor/github.com/coreos/etcd/client/util.go b/octopus/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 0000000..15a8bab --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE b/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 0000000..f26254b --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/srv/LICENSE b/octopus/vendor/github.com/coreos/etcd/pkg/srv/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/srv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/octopus/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 0000000..fefcbcb --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/LICENSE b/octopus/vendor/github.com/coreos/etcd/pkg/types/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/doc.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 0000000..de8ef0b --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/id.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 0000000..1b042d9 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/set.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 0000000..73ef431 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/slice.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 0000000..0dd9ca7 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/urls.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 0000000..9e5d03f --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/octopus/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/octopus/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 0000000..47690cc --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/octopus/vendor/github.com/coreos/etcd/version/LICENSE b/octopus/vendor/github.com/coreos/etcd/version/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/version/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/etcd/version/version.go b/octopus/vendor/github.com/coreos/etcd/version/version.go new file mode 100644 index 0000000..9134ceb --- /dev/null +++ b/octopus/vendor/github.com/coreos/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.2.0+git" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/octopus/vendor/github.com/coreos/go-semver/semver/LICENSE b/octopus/vendor/github.com/coreos/go-semver/semver/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/octopus/vendor/github.com/coreos/go-semver/semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/octopus/vendor/github.com/coreos/go-semver/semver/semver.go b/octopus/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 0000000..76cf485 --- /dev/null +++ b/octopus/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/octopus/vendor/github.com/coreos/go-semver/semver/sort.go b/octopus/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 0000000..e256b41 --- /dev/null +++ b/octopus/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/LICENSE.md b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/LICENSE.md new file mode 100644 index 0000000..1cade6c --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go new file mode 100644 index 0000000..8f44fa1 --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go @@ -0,0 +1,19 @@ +package md2man + +import ( + "github.com/russross/blackfriday" +) + +func Render(doc []byte) []byte { + renderer := RoffRenderer(0) + extensions := 0 + extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS + extensions |= blackfriday.EXTENSION_TABLES + extensions |= blackfriday.EXTENSION_FENCED_CODE + extensions |= blackfriday.EXTENSION_AUTOLINK + extensions |= blackfriday.EXTENSION_SPACE_HEADERS + extensions |= blackfriday.EXTENSION_FOOTNOTES + extensions |= blackfriday.EXTENSION_TITLEBLOCK + + return blackfriday.Markdown(doc, renderer, extensions) +} diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go new file mode 100644 index 0000000..292fca3 --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go @@ -0,0 +1,301 @@ +package md2man + +import ( + "bytes" + "fmt" + "html" + "strings" + + "github.com/russross/blackfriday" +) + +type roffRenderer struct{} + +var listCounter int + +func RoffRenderer(flags int) blackfriday.Renderer { + return &roffRenderer{} +} + +func (r *roffRenderer) GetFlags() int { + return 0 +} + +func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { + out.WriteString(".TH ") + + splitText := bytes.Split(text, []byte("\n")) + for i, line := range splitText { + line = bytes.TrimPrefix(line, []byte("% ")) + if i == 0 { + line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) + line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) + } + line = append([]byte("\""), line...) + line = append(line, []byte("\" ")...) + out.Write(line) + } + out.WriteString("\n") + + // disable hyphenation + out.WriteString(".nh\n") + // disable justification (adjust text to left margin only) + out.WriteString(".ad l\n") +} + +func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString("\n.PP\n.RS\n\n.nf\n") + escapeSpecialChars(out, text) + out.WriteString("\n.fi\n.RE\n") +} + +func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { + out.WriteString("\n.PP\n.RS\n") + out.Write(text) + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + + switch { + case marker == 0: + // This is the doc header + out.WriteString(".TH ") + case level == 1: + out.WriteString("\n\n.SH ") + case level == 2: + out.WriteString("\n.SH ") + default: + out.WriteString("\n.SS ") + } + + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) HRule(out *bytes.Buffer) { + out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") +} + +func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + listCounter = 1 + } + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", listCounter)) + listCounter += 1 + } else { + out.WriteString(".IP \\(bu 2\n") + } + out.Write(text) + out.WriteString("\n") +} + +func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + out.WriteString("\n.PP\n") + if !text() { + out.Truncate(marker) + return + } + if marker != 0 { + out.WriteString("\n") + } +} + +func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + out.WriteString("\n.TS\nallbox;\n") + + max_delims := 0 + lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") + for _, w := range lines { + cur_delims := strings.Count(w, "\t") + if cur_delims > max_delims { + max_delims = cur_delims + } + } + out.Write([]byte(strings.Repeat("l ", max_delims+1) + "\n")) + out.Write([]byte(strings.Repeat("l ", max_delims+1) + ".\n")) + out.Write(header) + if len(header) > 0 { + out.Write([]byte("\n")) + } + + out.Write(body) + out.WriteString("\n.TE\n") +} + +func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { + if out.Len() > 0 { + out.WriteString("\n") + } + out.Write(text) +} + +func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) +} + +func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) > 30 { + text = append([]byte("T{\n"), text...) + text = append(text, []byte("\nT}")...) + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write(text) +} + +func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { + +} + +func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + +} + +func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.WriteString("\n\\[la]") + out.Write(link) + out.WriteString("\\[ra]") +} + +func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB\\fC") + escapeSpecialChars(out, text) + out.WriteString("\\fR") +} + +func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fI") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { +} + +func (r *roffRenderer) LineBreak(out *bytes.Buffer) { + out.WriteString("\n.br\n") +} + +func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) + r.AutoLink(out, link, 0) +} + +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { + out.Write(tag) +} + +func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\s+2") + out.Write(text) + out.WriteString("\\s-2") +} + +func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { +} + +func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + +} + +func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { + out.WriteString(html.UnescapeString(string(entity))) +} + +func processFooterText(text []byte) []byte { + text = bytes.TrimPrefix(text, []byte("% ")) + newText := []byte{} + textArr := strings.Split(string(text), ") ") + + for i, w := range textArr { + if i == 0 { + w = strings.Replace(w, "(", "\" \"", 1) + w = fmt.Sprintf("\"%s\"", w) + } else { + w = fmt.Sprintf(" \"%s\"", w) + } + newText = append(newText, []byte(w)...) + } + newText = append(newText, []byte(" \"\"")...) + + return newText +} + +func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { + escapeSpecialChars(out, text) +} + +func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { +} + +func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(out *bytes.Buffer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out.WriteString("\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + out.Write(text[org:i]) + } + + // escape a character + if i >= len(text) { + break + } + out.WriteByte('\\') + out.WriteByte(text[i]) + } +} diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/LICENSE.md b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/LICENSE.md new file mode 100644 index 0000000..1cade6c --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/block.go b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 0000000..0b5510d --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1424 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + + "github.com/shurcooL/sanitized_anchor_name" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, syntax *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the syntax, and discard it if the caller doesn't care. + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isspace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isspace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < len(data) && !isspace(data[i]) { + syn++ + i++ + } + } + + *syntax = string(data[syntaxStart : syntaxStart+syn]) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var syntax string + beg, marker := isFenceLine(data, &syntax, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), syntax) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} diff --git a/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/html.go b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 0000000..74e67ee --- /dev/null +++ b/octopus/vendor/github.com/cpuguy83/go-md2man/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded