diff --git a/.gitignore b/.gitignore index 4c4a2fb6..b13b2f6e 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ var/ *.egg-info/ .installed.cfg *.egg +bin/ # PyInstaller # Usually these files are written by a python script from a template @@ -33,6 +34,9 @@ var/ *.manifest *.spec +# Don't ignore atomicapp.spec for PyInstaller builds +!atomicapp.spec + # Installer logs pip-log.txt pip-delete-this-directory.txt @@ -66,3 +70,5 @@ tags TAGS *.swp *~ +*.swo +*.swn diff --git a/.travis.yml b/.travis.yml index 96f15f0c..f654faf9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ # Travis CI checks for atomicapp language: python +sudo: required python: - "2.7" @@ -9,18 +10,16 @@ notifications: irc: "chat.freenode.net#nulecule" before_install: - - pip install pytest-cov coveralls --use-mirrors - - pip install pep8 --use-mirrors - - pip install flake8 --use-mirrors + - sudo pip install pytest-cov coveralls pep8 flake8 install: - - make install + - sudo make install before_script: - - make syntax-check + - sudo make syntax-check script: - - make test + - sudo make test after_success: - - coveralls + - sudo coveralls diff --git a/CHANGELOG.md b/CHANGELOG.md index d0e6760c..03866dec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,707 @@ +## Atomic App 0.6.4 (10-06-2016) + +This release of Atomic App includes a large refactor of the "config" class as well as a minor change to our release script. + +Features: + - Config refactor + - Release script fix + - Test cases added to the config class + +``` +Charlie Drage (1): + Update release script regex + +Ratnadeep Debnath (3): + Initial work on refactoring Nulecule config. #524 + Re implememt Config class to be more generic. Fixes #524 + Fixed tests for config refactor. +``` + +## Atomic App 0.6.3 (08-31-2016) + +This release focuses on bug fixes, scripts as well as the Nulecule specification being merged into Atomic App. + +Features: + - The Nulecule spec is now located within the Atomic App repo / project + - Bug fixes with the `atomicapp genanswers` command + - Release script added + +``` +Charlie Drage (7): + Remove lifecycle for updated CLI doc + Merge Nulecule specification into Atomic App + Formatting error with anymarkup and genanswers + Openshift to KubeShift conversion and improvements + Add release script for Atomic App + Fix typo in openshift.py provider + Fixes mode argument with --mode=genanswers + +Ratnadeep Debnath (1): + Use travis CI to run tests from tests/units dir. +``` + +## Atomic App 0.6.2 (07-27-2016) + +This release of Atomic App introduces the new `atomicapp index` command. + +We add this command in order to give a quick overview of all available featured and tested Nuleculized applications on [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). The ability to generate your own list is available as well with the `atomicapp index generate` command. + +The main features of this release are: + +* Addition of the `atomicapp index` command +* Correct file permissions are now when extracting Nuleculized containers +* OpenShift connection issue bugfix + + +## `atomicapp index` + +This release adds the addition of the `atomicapp index` command. By using the `atomicapp index list` command, Atomic App will retrieve a container containing a valid `index.yml` and output all available Nulecule containers. This index can also be updated by using `atomicapp index update`. + + +**atomicapp index list** + +Outputs the list of available containers located at `~/.atomicapp/index.yml`. + +``` +▶ atomicapp index list +INFO :: Atomic App: 0.6.2 - Mode: Index +ID VER PROVIDERS LOCATION +postgresql-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/postgresql-centos7-atomicapp +flask_redis_nulecule 0.0.1 {D,K} docker.io/projectatomic/flask-redis-centos7-atomicapp +redis-atomicapp 0.0.1 {D,O,K} docker.io/projectatomic/redis-centos7-atomicapp +gocounter 0.0.1 {D,K} docker.io/projectatomic/gocounter-scratch-atomicapp +mariadb-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/mariadb-centos7-atomicapp +helloapache-app 0.0.1 {D,K,M} docker.io/projectatomic/helloapache +mongodb-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/mongodb-centos7-atomicapp +etherpad-app 0.0.1 {D,O,K} docker.io/projectatomic/etherpad-centos7-atomicapp +apache-centos7-atomicapp 0.0.1 {D,K,M} docker.io/projectatomic/apache-centos7-atomicapp +wordpress-atomicapp 2.0.0 {D,O,K} docker.io/projectatomic/wordpress-centos7-atomicapp +skydns-atomicapp 0.0.1 {K} docker.io/projectatomic/skydns-atomicapp +guestbookgo-atomicapp 0.0.1 {O,K} docker.io/projectatomic/guestbookgo-atomicapp +mariadb-app 0.0.1 {D,K} docker.io/projectatomic/mariadb-fedora-atomicapp +gitlab-atomicapp 1.2.0 {D,K} docker.io/projectatomic/gitlab-centos7-atomicapp +``` + +**atomicapp index update** + +Updates the `index.yml` file. + +``` +▶ atomicapp index update +INFO :: Atomic App: 0.6.2 - Mode: Index +INFO :: Updating the index list +INFO :: Pulling latest index image... +INFO :: Skipping pulling docker image: projectatomic/nulecule-library +INFO :: Copying files from image projectatomic/nulecule-library:/index.yaml to /home/wikus/.atomicapp/index.yaml +INFO :: Index updated +``` + +**atomicapp index generate** + +Generates a valid `index.yml` file to use in listing all available containers. + +``` +▶ atomicapp index generate ./nulecule-library +INFO :: Atomic App: 0.6.1 - Mode: Index +INFO :: Generating index.yaml from ./nulecule-library +INFO :: index.yaml generated +``` + +``` +Abhishek (3): + incorrect_log_level + Show help when no arguments given. + raise DockerException for docker commands + +Charlie Drage (7): + Remove warning in regards to application-entity + Refactor extracting + Fixes connecting issues with OpenShift + Fix coverage + Modifies asking for parameters to show app name + When fetching or extracting, set the correct uid + guid + Add "index" command to Atomic App + +Shubham Minglani (1): + collect only atomicapp tests +``` + +## Atomic App 0.6.1 (07-06-2016) + +A minor release for Atomic App. + +With this release, we merge a few bug fixes in relation to our Kubernetes provider as well as clean up documentation. + +Features: + - Fix inclusive rules issue with Kubernetes + - Clean up CLI parsing documentation / help command + + +``` +Charlie Drage :: + Fix link issue on CLI doc within start guide + Clean up parsing + False positive error out on docker pull + +Khris Richardson : + more inclusive rules for adding es +``` + +## Atomic App 0.6.0 (06-14-2016) + +A major release of Atomic App, we incorporate major changes to the **kubernetes** provider. With this release we replace the usage of kubectl with the *requests* Python library and the Kubernetes HTTP API end-point. This change results in faster deployment, smaller image sizes and detailed error messages. + +The main features of this release are: + - Kubectl to API conversion for the Kubernetes provider + - Removal of ASCII art + +``` +Charlie Drage (3): + Ignore .swn vim temp files + Remove ASCII art + Add a new 'library' for k8s/openshift providers. + +Khris Richardson (2): + add support for api-groups + add precision to resource membership test +``` + +## Atomic App 0.5.2 (05-24-2016) + +This release of Atomic App we include binary generation, a logging refactor/clean-up as well as a minor code refactor to the main Nulecule codebase. + +The main features are: + - Add support for generating a static binary of Atomic App + - Logging clean-up + - README.md removed from `atomicapp init` generation + - Removal of dynamic module/class loading from providers in favour of static + +``` +Charlie Drage : + Ignore vim .swo files + Change provider-config warning to debug + Remove README.MD from init + Makes Makefile faster using .PHONY && add default python location + Modify TravisCI for updated Makefile + Removes loading via .py files + Hide container id output on container creation + Change dir of /external to /nulecule/external + Add binary generation + Update to a cleaner logging output. + Logging formatting + +Suraj Deshmukh : + Added elif to consecutive exclusive if statements + Use filter to search in kubeconfig.py + Now logs can show path to file under root atomicapp folder + Updated the inContainer function doc string + Removed unused function update from utils.py + Replaced a for loop that initialized a dict +``` + +## Atomic App 0.5.1 (04-26-2016) + +This is a minor release of Atomic App where we refactor, fix code bugs as well as deprecate an old feature. Due to the numerous issues of locking as well as the deprectation of the `lockfile` library we have remove the locking mechanism from Atomic App. + +The main features of this release are: + + - The deprecation of locking + - `atomicapp init` code and UI clean-up + - Documentation updates + - inContainer() function bug fix for Docker 1.10+ + +``` +Charlie Drage : + Add Marathon to index + Update README.md with correct installation instructions. + Remove dockerenv and dockerinit check + Remove locking from Atomic App + +Dusty Mabe : + init: remove unnecessary message to the user + init: remove number of return variables from NuleculeManager.init() + init: add logic to properly cleanup tmp dir + init: break a few long lines into shorter ones + +Shubham Minglani : + fix typo + +Suraj Deshmukh : + Moved a constant from openshift.py to constants.py + Abstracted the way we get absolute path +``` + +## Atomic App 0.5.0 (04-12-2016) + +This is a major release of Atomic App where we introduce a new CLI command as well as the renaming of multiple provider configuration parameters. + +The main features of this release are: + + - Introduction of the `atomicapp init` CLI command + - Renaming of provider configuration related parameters + - --provider-auth added as a CLI command + +Other: + + - Updated legal information + - Bug fix on persistent storage initialization + - Utility method to gather sudo user path and information + - Improved detection if we're inside a Docker container + - Improved readility on provider failed exceptions + - docker inspect bugfix + +## Atomic App Initialization + +We've included support for initializing a basic Atomic App via the `atomicapp init` command. This creates a basic example that can be used on __Docker__ and __Kubernetes__ providers based on the [centos/httpd](https://hub.docker.com/r/centos/httpd/) docker image. + +```bash +▶ atomicapp init helloworld +[INFO] - main.py - Action/Mode Selected is: init + +Atomic App: helloworld initialized at ./helloworld + +▶ vim ./helloworld/Nulecule # Make changes to the Nulecule file + +▶ atomicapp run ./helloworld +[INFO] - main.py - Action/Mode Selected is: run +[INFO] - base.py - Provider not specified, using default provider - kubernetes +[WARNING] - plugin.py - Configuration option 'provider-config' not found +[WARNING] - plugin.py - Configuration option 'provider-config' not found +[INFO] - kubernetes.py - Using namespace default +[INFO] - kubernetes.py - trying kubectl at /usr/bin/kubectl +[INFO] - kubernetes.py - trying kubectl at /usr/local/bin/kubectl +[INFO] - kubernetes.py - found kubectl at /usr/local/bin/kubectl +[INFO] - kubernetes.py - Deploying to Kubernetes + +Your application resides in ./helloworld +Please use this directory for managing your application + +``` + +## New provider configuration parameter names + +We've renamed the provider-specific parameters for better clarity by adding dashes in-between 'provider' and the specified function. + +Major changes include the renaming of __accesstoken__ to __provider-auth__. + +``` +providerapi --> provider-api +accesstoken --> provider-auth +providertlsverify --> provider-tlsverify +providercafile --> provider-cafile +``` + +```ini +[general] +provider = openshift +namespace = mynamespace +provider-api = https://127.0.0.1:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd +provider-tlsverify = True +provider-cafile = /etc/myca/ca.pem +``` + +```sh +atomicapp run projectatomic/etherpad-centos7-atomicapp --provider openshift --provider-tlsverify False --provider-auth foo --provider-api "https://localhost:8443" +``` + +``` +Charlie Drage : + Add more legal information + Update year + Requirements should retrieve from Nulecule object not graph + Warn not error on missing requirement + Util to gather what user is running Atomic App and which home dir it should use + Check to see if it's a Docker container + Update Dockerfile.pkgs testing repos + Dashes added to CLI commands to distinguish provider config data + Fix test params with the new dashes + Add provider-auth as a CLI command and convert ACCESSTOKEN to provider-auth + Modify accesstoken tests to provider-auth + Change constant ACCESS_TOKEN_KEY to PROVIDER_AUTH_KEY + Modify documentation to reflect changes in params + Remove /host from provider config example path + +Dusty Mabe : + init: modify docker template to publish to host port 80 + +Ratnadeep Debnath : + Added 'init' command to initialize a new atomic app. + Make destination optional atomicapp init command + Ask user if destination is not empty when initializting atomic app. + Do not acquire lock for initializing atomicapp. + Set default action as 'Y' when atomicapp init asks to clean dest directory. + Include nulecule template files in package data. + Add k8s service for initialized app. + Moved Nulecule template files to external dir. + Don't hard code atomicapp/nulecule versions in atomic app template. + Show status message on atomicapp init run. + Added doc strings for atomicapp init method. + +Shubham Minglani : + Handle ProviderFailedException, fix #627 + add pass for improved readability + replace inspect with ps, fix #672 + +Suraj Deshmukh : + Added OrderedDict so as to deploy application in given order +``` + +## Atomic App 0.4.5 (03-29-2016) + +This is a minor release of Atomic App where we make some changes to the UI output as well as fix a few provider-specific bugs. + +The main features of this release are: + + - Cleaner logging output + - More prominent output when Atomic App asks for a missing answers variable + - Multi-line artifact support for the Docker provider + +Other: + + - Update documentation on cli comparison + - Move kubeconfig parsing to a separate class + - Refactor cli exceptions code + +``` +Charlie Drage : + Convert to epoch time + Update docs / more clarity on cli comparison + Change asking format + Move kubeconfig parsing to a separate file + Change kubeconfig testing name and move into a separate file + Add weekly meeting information + +Preeti Chandrashekar : + Minor edits to atomicapp_lifecycle.md + +Shubham Minglani : + handle docker error, raise instead of print, fix #570 + refactor exceptions, fix #621, fix #622 + +Suraj Deshmukh : + Docker-run file with multi-line command supported +``` + +## Atomic App 0.4.4 (03-15-2016) + +This release includes a major update to our documentation as well as the user experience when deploying an Atomic App. + +The main features are: + + - Major update to documentation + - Cleaner logging output + - Error-out validation on JSON/YAML + +UI: + + - Fix --version output on CLI + - Handle docker exception errors + - Inform on wrong provider name provided within answers.conf + +Other: + + - Fix requirements on 'make test' + +For a full list of changes between 0.4.4 and 0.4.3 please see the git shortlog below! + +``` +Charlie Drage (10): + Doc for current functions implemented by spec + Fix minor markdown error in spec_coverage + Major update to README and documentation + Error cleanly on missing Nulecule or invalid formatted Nulecule + Multiple problems with issuestats.com + Add tests for missing Nulecule or wrongly formatted file for missing + Change from ReadWrite to ReadWriteOnce in persistent storage tests + Add Persistent Storage validation on ReadWriteOnce, etc. + Remove time from default output + Convert to epoch time + +Dusty Mabe (2): + docs: fix broken link + logging: fix duplicated log messages + +Preeti Chandrashekar (1): + Minor edits to atomicapp_lifecycle.md + +Shubham Minglani (2): + Handle docker pull exception, improve #441, fix #568 + fix --version output, fix #481 + +Suraj Deshmukh (5): + Install requirements before make test + Wrong provider name in answers.conf, exits AtomicApp with readable error + Typo in providers docs + Instruction to skip travis CI + Inform user when provider not specified +``` + +## Atomic App 0.4.3 (03-01-2016) + +You'll now see pretty colors with logging / output! + +With this release, we've refactored our logging formatter making it easier to decipher between information, debug, warning and errors. + +You are now able to specify what logging format you'd like to output via the command line: + +``` + --logtype {cockpit,color,nocolor,none} + Override the default logging output. The options are: + nocolor: we will only log to stdout; color: log to + stdout with color; cockpit: used with cockpit + integration; none: atomicapp will disable any logging. + If nothing is set and logging to file then 'nocolor' + by default. If nothing is set and logging to tty then + 'color' by default. +``` + +The main features are: + + - A new logging mechanism that outputs color-coordinated logging messages + - Added CLI commands for color, nocolor, cockpit and 'none' output + +UI: + + - Failure on finding no artifacts + +Other: + + - Readme updates / typo fixes + +``` +Charlie Drage (3): + Fail if unable to find artifact + Change order of getting context + Update readme + +Dusty Mabe (4): + logging: Add in Atomic App Logging class + logging: add cockpit logging output + tests: fix test to look for output in stdout vs stderr + +Shubham Minglani (1): + Remove extra whitespaces from logging output. +``` + +## Atomic App 0.4.2 (02-18-2016) + +As we start to get closer to a 1.0.0 release, we continue to focus on tests and user interaction. This weeks release focus on both as well as a minor feature. + +The main features of this release are: + - Meta data is now an optional requirement when building + +UI: + - Relative path support for provider ocnfig data + - Raise on missing artifact or docker image failure + +Bug fixes: + - Label fixes against `atomic` cli master branch. We now pass our current working directory as a variable + +Misc: + - Remove uneeded test suite files (Dockerfiles, licenses, etc.) + - All references to `install` have now been removed within the code in favour of `fetch` + + +``` +Charlie Drage : + Fail on missing artifacts within Nulecule file + Add tests for failure of finding Nulecule artifacts + Remove instances of install verb to fetch + Remove unneeded files in test examples dirs + Fix xpathing tests on missing files + +Dusty Mabe : + providerconfig: support someone specifying a relative path + cli: Print helpful error if no app_spec provided. + Do not use artifacts dir to select provider. + tests: update cli test + labels: update run labels to no longer use backticks + labels: no longer default to verbose output + Add Tomas to MAINTAINERS + Adds Atomicapp lifecycle definition. Closes #290 + nulecule: error if no artifacts in spec for inherited provider + +Ratnadeep Debnath : + Update file handling doc. Fixes #285 + +Suraj Deshmukh : + This makes `metadata` an optional argument +``` + +## Atomic App 0.4.1 (02-02-2016) + +0.4.1 is a minor bug fix release. + +``` +Charlie Drage : + Remove roadmap in favour of wiki + Remove symbolic link from Dockerfile + +Dusty Mabe : + cli: Fix bug with atomic cli + genanswers + openshift: Fix a few spelling mistakes. + openshift: clean up scale function log message. + If not given, don't populate namespace in answers.conf.gen. + +Tomas Kral : + marathon: do not convert types when parsing json artifact +``` + +## Atomic App 0.4.0 (01-20-2016) + +With this release we bump our version to 0.4.0 to coincide with our BETA-4 release as well as the change to our "install" verb. + +The most significant new features are: + - Renaming install to fetch + - Allowing users to pass an answers file as a URL + +For an extended list of changes, please see the git shortlog below. + +``` +Charlie Drage : + Change undeploy/deploy functions to run/stop + Rename install to fetch + Remove mention of uninstall function + Fix test names + Remove install label from Dockerfiles + +Dusty Mabe : + docker: fix stopping for artifacts with '--name=' + cli: allow specifying target dir during atomic run + cli: add --namespace option to cli + Allow users to provide answers file as url. + Create destination app_path dir if it doesn't exist yet. + +Ratnadeep Debnath : + Support specifying default provider in Nulecule spec file. Fixes #378 + +Tomas Kral : + openshift provider: safer stop + openshift provider: fix typos, add more explanation + openshift provider: remove acronyms from comments +``` + +## Atomic App 0.3.1 (01-14-2016) + +This release introduces some significant features to Atomic App as well as our first release since 0.3.0. + +The outmost features include: + - Persistent storage + - HTTPS (TLS) verification and support for OpenShift + - OpenShift stop support + - Nested Nulecule application support for OpenShift. + +For an extended list of changes, please see the git shortlog below. + +``` +Charlie Drage (9): + Merge pull request #457 from rtnpro/remove-docker-containers-on-stop + Merge pull request #392 from kadel/marathon-provider + 0.3.0 Release + Add persistent storage core + Add Kubernetes persistent storage functionality + Test requirements.py persistent storage + Warn if no persistent volumes exist to claim + Merge pull request #485 from kadel/issue484 + Stop Docker containers more gracefully + +Dharmit Shah (10): + Common place for list of Providers + PEP8 + Adds Marathon provider data for `helloapache` example + Nulecule for `helloapache` app now contains information about marathon artifacts + CLI tests for marathon provider using `helloapache` atomic app + Information about where to specify `providerapi` for Marathon provider + Changes suggested in PR review + Added try..except block for request + Catch `AnyMarkupError` instead of `Exception` for invalid artifacts + Use `ProviderFailedException` instead of `sys.exit` + +Dusty Mabe (40): + Merge pull request #463 from kadel/make_rest_request + Revert "Remove container on stopping on Docker provider. Fixes #389" + Merge pull request #464 from projectatomic/revert-457-remove-docker-containers-on-stop + Allow user to specify both source and destination as directories. + Merge pull request #466 from dustymabe/dusty-src-dest + cli: import argparse rather than specific items + cli: Restructure argument parsers. + cli: Add global options help text to toplevel parser. + cli: Add in a --mode cli switch to select action. + Merge pull request #468 from dustymabe/dusty-add-mode + Fix yaml choice for --answers-format. + utils: add rm_dir() function. + Add --destination=none. Files don't persist after run. + Update native openshift code to use dest=none. + Add 'genanswers' action to generate answers.conf in cwd. + Merge pull request #469 from dustymabe/dusty-add-genanswers-new + cli: Fix the name of the genanswers subparser. + cli: Clarify some of the app_spec help texts. + Merge pull request #465 from projectatomic/openshift-unittests + Merge pull request #473 from kadel/openshift-AttributeError + Merge pull request #472 from dustymabe/dusty-update-stop-app-spec-help + Merge pull request #474 from kadel/openshift-stop + Merge pull request #460 from cdrage/persistent-storage + Merge pull request #488 from cdrage/stop-more-gracefully + cli: Add genanswers as a choice for --mode. + Include port information in detected openshift api endpoint. + Merge pull request #490 from dustymabe/allow-genanswers-for-mode + Merge pull request #491 from dustymabe/dusty-add-port-to-providerapi + Merge pull request #480 from kadel/openshift-ssl + Merge pull request #489 from projectatomic/oc-new-app-with-nested-nulecules + cli: allow overriding cmdline from env vars + Merge pull request #504 from dustymabe/dusty-cli-overrides + Add support for embedding answers file in application. + Merge pull request #505 from dustymabe/dusty-allow-embedded-answers-file + Add in cli options for some provider* answers. + Merge pull request #506 from dustymabe/dusty-add-cli-overrides + native openshift: move detection of provider information to provider. + native openshift: Add in ssl verification. + native openshift: respect it if user set tls_verify to False. + Merge pull request #503 from dustymabe/dusty-ssl-in-native-openshift + +Ratnadeep Debnath (13): + Remove container on stopping on Docker provider. Fixes #389 + Refactored openshift provider for testing. #459 + Refactor openshift provider: Move interaction with remote API from OpenShiftProvider + Added tests for OpenshiftProvider.deploy. + Refactor openshift _process_artifacts + Added tests for openshift _process_artifact_data. + Added tests for openshift to parse kube config + Added docs for openshift provider unittests. + Unpack image using Openshift API on Openshift provider. + Fixed unittests for Nulecule and NuleculeComponent + Fix using ssl connection options in websocket connection to Openshift. + Wait for Openshift pod to run, before extracting content. + Delete openshift pod irrespective of successful or failed extraction. + +Tomas Kral (24): + move openshift._make_request() to Utils.make_rest_request() + first draft of marathon provider + change providerurl to providerapi + fix dry-run for marathon + empty marathon_artifacts array in init() + marathon fixes + add Marathon to list of supported providers + raise exeption on AnyMarkupError in Marathon provider + mention Mesos with Marathon in docs + use Utils.make_rest_request in Marathon provider + add more docs to functions in Marathon provider + fix AttributeError OpenshiftClient.ssl_verify + Implement stop for OpenShift provider. + openshift provider: fix typos, add comments + openshift provider: when deleting use selector from RC to get PODs + openshift provider: update comments + openshift provider: add option for skiping tls verification + fix typos and flake8 errors + openshift provider: doc of providertlsverify and providercafile + openshift provider: break ssl_verify to provider_ca and provider_tls_verify + openshift provider: use _requests_tls_verify() in undeploy + openshift provider: check that required options are !None + openshift provider: test connection to OpenShift print nicer error message when invalid ttl/ssl certificate + openshift provider: translate CA path to host path and check if exists +``` + ## Atomic App 0.3.0 (12-16-2015) This release introduces a new provider (Mesos) as well as a major refactor of the OpenShift provider. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b53c5a99..c9f5dec1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,6 +7,48 @@ which is hosted in the [Project Atomic Organization](https://github.com/projecta These are just guidelines, not rules, use your best judgment and feel free to propose changes to this document in a pull request. +## Initial dev environment + +First of all, clone the github repository: `git clone https://github.com/projectatomic/atomicapp`.. + +### Installing Atomic App locally +Simply run + +``` +make install +``` + +If you want to do some changes to the code, I suggest to do: + +``` +cd atomicapp +export PYTHONPATH=`pwd`:$PYTHONPATH +alias atomicapp="python `pwd`/atomicapp/cli/main.py" +``` + +### Building for containerized execution +``` +docker build -t [TAG] . +``` + +Use 'docker build' to package up the application and tag the resulting image. + +### Fetch and run +``` +atomicapp [--dry-run] [-v] [-a answers.conf] fetch|run|stop|genanswers [--provider docker] [--destination DST_PATH] APP|PATH +``` + +Pulls the application and its dependencies. If the last argument is +existing path, it looks for `Nulecule` file there instead of pulling anything. + +* `--provider docker` Use the Docker provider within the Atomic App +* `--destination DST_PATH` Unpack the application into given directory instead of current directory +* `APP` Name of the image containing the application (ex. `projectatomic/apache-centos7-atomicapp`) +* `PATH` Path to a directory with installed (ex. result of `atomicapp fetch...`) app + +Action `run` performs `fetch` prior to its own tasks if an `APP` is provided. Otherwise, it will use its respective `PATH`. When `run` is selected, providers' code is invoked and containers are deployed. + + ## Submitting Issues * You can create an issue [here](https://github.com/projectatomic/atomicapp/issues/new), include as many details as possible with your report. @@ -33,13 +75,11 @@ Before you submit your pull request consider the following guidelines: * Include documentation that either describe a change to a behavior of atomicapp or the changed capability to an end user of atomicapp. * Commit your changes using **a descriptive commit message**. If you are fixing an issue please include something like 'this closes issue #xyz'. -* Additionally think about implementing a git hook, as flake8 is part of the [travis-ci tests](https://travis-ci.org/projectatomic/atomicapp) it will help you pass the CI tests. +* Make sure your tests pass! As we use [travis-ci](https://travis-ci.org/projectatomic/atomicapp) with __flake8__ it's recommended to run both commands before submitting a PR. ```shell - $ cat .git/hooks/pre-push - #!/bin/bash - - flake8 -v atomicapp + make syntax-check + make test ``` * Push your branch to GitHub: @@ -60,6 +100,8 @@ Before you submit your pull request consider the following guidelines: That's it! Thank you for your contribution! +**NOTE**: When submitting a documentation PR, you can skip the travis ci by adding `[ci skip]` to your commit message. + ### Merge Rules * Include unit or integration tests for the capability you have implemented diff --git a/Dockerfile b/Dockerfile deleted file mode 120000 index 9b8457c0..00000000 --- a/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -Dockerfiles.git/Dockerfile.centos \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..53c1d456 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM centos:7 + +MAINTAINER Red Hat, Inc. + +ENV ATOMICAPPVERSION="0.6.4" + +LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ + io.openshift.generate.job=true \ + io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + +WORKDIR /opt/atomicapp + +# Add the requirements file into the container +ADD requirements.txt ./ + +# Install needed requirements +RUN yum install -y epel-release && \ + yum install -y --setopt=tsflags=nodocs docker && \ + yum install -y --setopt=tsflags=nodocs $(sed s/^/python-/ requirements.txt) && \ + yum clean all + +WORKDIR /atomicapp + +# If a volume doesn't get mounted over /atomicapp (like when running in +# an openshift pod) then open up permissions so files can be copied into +# the directory by non-root. +RUN chmod 777 /atomicapp + +ENV PYTHONPATH /opt/atomicapp/ + +# the entrypoint +ENTRYPOINT ["/usr/bin/python", "/opt/atomicapp/atomicapp/cli/main.py"] + +# Add all of Atomic App's files to the container image +# NOTE: Do this last so rebuilding after development is fast +ADD atomicapp/ /opt/atomicapp/atomicapp/ diff --git a/Dockerfile.test b/Dockerfile.test index be7b97e1..81b76613 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -12,8 +12,6 @@ RUN yum install -y epel-release && \ yum install -y --setopt=tsflags=nodocs $(sed s/^/python-/ test-requirements.txt) && \ yum clean all -RUN mkdir /run/lock - ENV PYTHONPATH $PYTHONPATH:/opt/atomicapp/atomicapp CMD python -m pytest -vv tests --cov atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 55ef5c6f..53c1d456 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,14 +2,13 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp @@ -29,11 +28,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index db9ef706..e30d1f5b 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,12 +2,11 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp @@ -33,11 +32,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 7ed3d788..8b3defe3 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,14 +2,13 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp @@ -28,11 +27,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 857ae1a4..e85038e5 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,15 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.6.4" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp @@ -21,11 +20,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - RUN yum install -y epel-release && \ yum install -y atomicapp-${ATOMICAPPVERSION} ${TESTING} --setopt=tsflags=nodocs && \ yum clean all diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index c798c25f..44294b3d 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,15 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.6.4" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp @@ -21,11 +20,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - RUN dnf install -y atomicapp-${ATOMICAPPVERSION} ${TESTING} --setopt=tsflags=nodocs && \ dnf clean all diff --git a/MAINTAINERS b/MAINTAINERS index 53abf08d..ab7fab43 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2,4 +2,5 @@ Charlie Drage (@cdrage) Christoph Goern (@goern) Dusty Mabe (@dustymabe) Ratnadeep Debnath (@rtnpro) +Tomas Kral (@kadel, @tkral) Vaclav Pavlin (@vpavlin) diff --git a/Makefile b/Makefile index 7f42b19e..bd4f223f 100644 --- a/Makefile +++ b/Makefile @@ -1,17 +1,34 @@ +# This can be overriden (for eg): +# make install PYTHON=/usr/bin/python2.7 +PYTHON ?= /usr/bin/python +DOCKER ?= /usr/bin/docker + +.PHONY: all all: - python -m pytest -vv + $(PYTHON) -m pytest -vv +.PHONY: install install: - python setup.py install + $(PYTHON) setup.py install +.PHONY: test test: - python -m pytest -vv + pip install -qr requirements.txt + pip install -qr test-requirements.txt + $(PYTHON) -m pytest tests/units/ -vv --cov atomicapp +.PHONY: image image: - docker build -t $(tag) . + $(DOCKER) build -t $(tag) . +.PHONY: syntax-check syntax-check: flake8 atomicapp +.PHONY: clean clean: - python setup.py clean --all + $(PYTHON) setup.py clean --all + +.PHONY: binary +binary: + ./script/binary.sh diff --git a/README.md b/README.md index 4c1109dc..90af9550 100644 --- a/README.md +++ b/README.md @@ -1,88 +1,109 @@ # Atomic App -Atomic App is a reference implementation of the [Nulecule Specification](http://www.projectatomic.io/docs/nulecule/). It can be used to bootstrap container applications and to install and run them. Atomic App is designed to be run in a container context. Examples using this tool may be found in the [Nulecule examples directory](https://github.com/projectatomic/nulecule/tree/master/examples). +![](docs/images/logo.png "Project Atomic") -## Getting Started +Atomic App is a reference implementation of the [Nulecule](docs/spec/README.md) specification. Packaged Atomic App containers are "Nuleculized" and each component of the package is a "Nulecule". -Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: +Atomic App is used to bootstrap packaged container environments and run them on multiple container orchestrators. It is designed from the ground-up to be portable and provider pluggable. -``` -FROM projectatomic/atomicapp:0.3.0 - -MAINTAINER Your Name + - __A "packaged installer" for all container-based environments and applications.__ Replace all those bash and Ansible scripts with one container-based deployment tool. -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts -``` + - __Target multiple providers:__ Specify the provider you want the Atomic App to run on. It supports Kubernetes, OpenShift, Mesos+Marathon and Docker. -For more information see the [Atomic App getting started guide](http://www.projectatomic.io/docs/atomicapp/). + - __Inherit already packaged containers:__ Create composite applications by referencing other Nulecule-compliant applications. For example, plugging in an alternative well-orchestrated database in another referenced container image. -## Developers + - __Fetch and run entire environments:__ Use `atomicapp fetch` and `atomicapp run` to run pre-packaged Nuleculized containers. -First of all, clone the github repository: `git clone https://github.com/projectatomic/atomicapp`. +## Installing Atomic App +From Linux: +```sh +git clone https://github.com/projectatomic/atomicapp && cd atomicapp +sudo make install +``` -### Install this project -Simply run +_or_ -``` -pip install . +Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): +```sh +export RELEASE=0.6.4 +wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz +tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE +sudo make install ``` -If you want to do some changes to the code, I suggest to do: +## Documentation -``` -cd atomicapp -export PYTHONPATH=`pwd`:$PYTHONPATH -alias atomicapp="python `pwd`/atomicapp/cli/main.py" -``` +This README contains some high level overview information on Atomic App. The detailed documentation for Atomic App resides in the [docs](docs) directory. The index provided conveniently links to each section below: -### Build -``` -docker build -t [TAG] . -``` +1. [Quick start](docs/quick_start.md) +2. [Getting started](docs/start_guide.md) +3. [Providers](docs/providers.md) + 1. [Docker](docs/providers/docker/overview.md) + 2. [Kubernetes](docs/providers/kubernetes/overview.md) + 3. [OpenShift](docs/providers/openshift/overview.md) + 4. [Marathon](docs/providers/marathon/overview.md) +4. [CLI](docs/cli.md) +5. [Nulecule file](docs/nulecule.md) +6. [File handling](docs/file_handling.md) +7. [Specification coverage](docs/spec_coverage.md) +8. [Contributing](CONTRIBUTING.md) +9. [Dependencies](docs/requirements.md) +10. [Specification](docs/spec/README.md) -Just a call to Docker to package up the application and tag the resulting image. -### Install and Run -``` -atomicapp [--dry-run] [-a answers.conf] install|run [--recursive] [--update] [--destination DST_PATH] APP|PATH -``` +## Getting started -Pulls the application and it's dependencies. If the last argument is -existing path, it looks for `Nulecule` file there instead of pulling anything. +Atomic App can be used either natively on your OS __or__ ran via the [atomic](https://github.com/projectatomic/atomic) command on [Fedora or CentOS Atomic hosts](https://www.projectatomic.io/download/). -* `--recursive yes|no` Pull whole dependency tree -* `--update` Overwrite any existing files -* `--destination DST_PATH` Unpack the application into given directory instead of current directory -* `APP` Name of the image containing the application (f.e. `vpavlin/wp-app`) -* `PATH` Path to a directory with installed (i.e. result of `atomicapp install ...`) app +__Detailed instructions on [getting started](docs/start_guide.md) are available.__ Alternatively, use the [quick start guide](docs/quick_start.md) to get a Nuleculized application running immediately. -Action `run` performs `install` prior its own tasks are executed if `APP` is given. When `run` is selected, providers' code is invoked and containers are deployed. +An extended guide on the `Nulecule` file format is [also available](docs/nulecule.md). -## Providers +## Real-world examples +Atomic App can be used to launch a cluster of containers (application servers, databases, etc.). -Providers represent various deployment targets. They can be added by placing a file called `provider_name.py` in `providers/`. This file needs to implement the interface explained in (providers/README.md). For a detailed description of all providers available see the [Provider description](docs/providers.md). +For a list of already packaged examples, visit the [nulecule-library](https://github.com/projectatomic/nulecule-library) repo. -## Dependencies +## Providers -Please see [REQUIREMENTS](https://github.com/projectatomic/atomicapp/blob/master/docs/requirements.md) for current Atomic App dependencies. +We currently support: -##Communication channels + - Docker + - Kubernetes + - OpenShift 3 + - Marathon (Mesos) -* IRC: #nulecule (On Freenode) -* Mailing List: [container-tools@redhat.com](https://www.redhat.com/mailman/listinfo/container-tools) +Providers represent various deployment targets. They can be added by placing the artifact within the respective in `artifacts/` folder. For example, placing `deploy_pod.yml` within `artifacts/kubernetes/`. -# The Badges +For a detailed description of all providers available see [docs/providers.md](docs/providers.md). +## Contributing to Atomic App [![Code Health](https://landscape.io/github/projectatomic/atomicapp/master/landscape.svg?style=flat)](https://landscape.io/github/projectatomic/atomicapp/master) [![Build Status](https://travis-ci.org/projectatomic/atomicapp.svg?branch=master)](https://travis-ci.org/projectatomic/atomicapp) [![Coverage Status](https://coveralls.io/repos/projectatomic/atomicapp/badge.svg?branch=master&service=github)](https://coveralls.io/github/projectatomic/atomicapp?branch=master) -[![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/pr)](http://issuestats.com/github/projectatomic/atomicapp) -[![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/issue)](http://issuestats.com/github/projectatomic/atomicapp) -# Copyright +First of all, awesome! We have [a development guide to help you get started!](CONTRIBUTING.md) + +If you have any issues or get stuck, feel free to open a GitHub issue or reach us at our communication channels (see below). + +## Dependencies + +See [REQUIREMENTS.md](docs/requirements.md) for a list of current Atomic App dependencies. + +## Specification + +Want to view the specification and contribute to changes? See the [Nulecule spec](docs/spec/README.MD) for more information. + +## Communication channels + +* IRC: __#nulecule__ on irc.freenode.net +* Mailing List: [container-tools@redhat.com](https://www.redhat.com/mailman/listinfo/container-tools) +* Weekly IRC Nulecule meeting: Monday's @ 0930 EST / 0130 UTC +* Weekly SCRUM Container-Tools meeting: Wednesday's @ 0830 EST / 1230 UTC on [Bluejeans](https://bluejeans.com/381583203/) + +## Copyright -Copyright (C) 2015 Red Hat Inc. +Copyright (C) 2016 Red Hat Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 33396192..00000000 --- a/ROADMAP.md +++ /dev/null @@ -1,28 +0,0 @@ -# Atomic App Roadmap - -This document provides a roadmap for current Atomic App development. The dates and features listed below are not considered final but rather an indication of what the core contributors are working on and the direction of Atomic App. - -Atomic App is the implementation of the [Nulecule spec](https://github.com/projectatomic/nulecule). We follow the spec closely, the current spec version as well as Atomic App version can be found via `atomicapp --version`. - -__Unless otherwise announced, the Atomic App CLI as well as Nulecule spec are subject to change. Backwards compatibility is a priority for version 1.0.0__ - -We rank all ROADMAP objectives by order of priority. These are subject to frequent change. - -#### High priority - - __Persistent storage__ - - Implement stop for OpenShift provider - - Support running Kubernetes from an Openshift template - -#### Medium priority - - Refactor logging - - AWS provider support - - Docker compose provider - -#### Low priority - - Nulecule index / library - - Keep versioning info in one location - - Ansible provider - - Nspawn provider - - Add a `USER` to Atomic App image - - https/ssh/sftp support for artifacts - - Use API instead of direct command-line for Docker && Kubernetes orchestration diff --git a/atomicapp.spec b/atomicapp.spec new file mode 100644 index 00000000..a55e04e4 --- /dev/null +++ b/atomicapp.spec @@ -0,0 +1,57 @@ +# -*- mode: python -*- + +# Function in order to recursively add data directories to pyinstaller +def extra_datas(mydir): + def rec_glob(p, files): + import os + import glob + for d in glob.glob(p): + if os.path.isfile(d): + files.append(d) + rec_glob("%s/*" % d, files) + files = [] + rec_glob("%s/*" % mydir, files) + extra_datas = [] + for f in files: + extra_datas.append((f, f, 'DATA')) + + return extra_datas + +block_cipher = None + +# Due to the way that we dynamically load providers via import_module +# in atomicapp/plugin.py we have to specify explicitly the modules directly +# so pyinstaller can "see" them. This is indicated by 'hiddenimports' +a = Analysis(['atomicapp/cli/main.py'], + pathex=['.'], + binaries=None, + datas=None, + hiddenimports=[ + 'atomicapp.providers.docker', + 'atomicapp.providers.kubernetes', + 'atomicapp.providers.openshift', + 'atomicapp.providers.marathon' + ], + hookspath=[], + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher) + +# Add external data (atomicapp init + provider external data) +a.datas += extra_datas('atomicapp/providers/external') +a.datas += extra_datas('atomicapp/nulecule/external') + +pyz = PYZ(a.pure, a.zipped_data, + cipher=block_cipher) +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + name='atomicapp/cli/main', + debug=False, + strip=False, + upx=True, + console=True ) diff --git a/atomicapp/__init__.py b/atomicapp/__init__.py index 24dd79f8..e69de29b 100644 --- a/atomicapp/__init__.py +++ b/atomicapp/__init__.py @@ -1,41 +0,0 @@ -""" - Copyright 2015 Red Hat, Inc. - - This file is part of Atomic App. - - Atomic App is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - Atomic App is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with Atomic App. If not, see . -""" - -import logging - - -def set_logging(name="atomicapp", level=logging.DEBUG): - # create logger - logger = logging.getLogger() - logger.handlers = [] - logger.setLevel(level) - - # create console handler - ch = logging.StreamHandler() - - # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - - # add formatter to ch - ch.setFormatter(formatter) - - # add ch to logger - logger.addHandler(ch) - -set_logging(level=logging.DEBUG) # override this however you want diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py new file mode 100644 index 00000000..f5827dc0 --- /dev/null +++ b/atomicapp/applogging.py @@ -0,0 +1,199 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import sys +import logging + +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT) + + +class customOutputFormatter(logging.Formatter): + """ + A class that adds 'longerfilename' support to the logging formatter + This 'longerfilename' will be path/to/file.py after the root atomicapp + folder. + """ + + def __init__(self, *args): + super(customOutputFormatter, self).__init__(*args) + + # setting the root directory path of code base, currently this file is + # at /home/xyz/atomicapp/applogging.py so we need this path except for + # the 'applogging.py' so while splitting here, only last component + # i.e. 'applogging.py' is excluded by using -1, so if applogging.py + # is moved into another directory and if path becomes + # /home/xyz/atomicapp/logs/applogging.py to remove + # 'logs/applogging.py' use -2 + self.atomicapproot = '/'.join(__file__.split('/')[:-1]) + + def format(self, record): + """ + Add the 'longerfilename' field to the record dict. This is + then used by the Formatter in the logging library when + formatting the message string. + """ + record.longerfilename = record.pathname.split(self.atomicapproot)[-1].lstrip('/') + + # Call the parent class to do formatting. + return super(customOutputFormatter, self).format(record) + + +class colorizeOutputFormatter(customOutputFormatter): + """ + A class to colorize the log msgs based on log level + """ + + def format(self, record): + # Call the parent class to do formatting. + msg = super(colorizeOutputFormatter, self).format(record) + + # Now post process and colorize if needed + if record.levelno == logging.DEBUG: + msg = self._colorize(msg, 'cyan') + elif record.levelno == logging.WARNING: + msg = self._colorize(msg, 'yellow') + elif record.levelno == logging.INFO: + msg = self._colorize(msg, 'white') + elif record.levelno == logging.ERROR: + msg = self._colorize(msg, 'red') + else: + raise Exception("Invalid logging level {}".format(record.levelno)) + return self._make_unicode(msg) + + def _colorize(self, text, color): + """ + Colorize based upon the color codes indicated. + """ + # Console color codes + colorCodes = { + 'white': '0', 'bright white': '1;37', + 'blue': '0;34', 'bright blue': '1;34', + 'green': '0;32', 'bright green': '1;32', + 'cyan': '0;36', 'bright cyan': '1;36', + 'red': '0;31', 'bright red': '1;31', + 'purple': '0;35', 'bright purple': '1;35', + 'yellow': '0;33', 'bright yellow': '1;33', + } + return "\033[" + colorCodes[color] + "m" + text + "\033[0m" + + def _make_unicode(self, input): + """ + Convert all input to utf-8 for multi language support + """ + if type(input) != unicode: + input = input.decode('utf-8') + return input + + +class Logging: + + @staticmethod + def setup_logging(verbose=None, quiet=None, logtype=None): + """ + This function sets up logging based on the logtype requested. + The 'none' level outputs no logs at all + The 'cockpit' level outputs just logs for the cockpit logger + The 'nocolor' level prints out normal log msgs (no cockpit) without color + The 'color' level prints out normal log msgs (no cockpit) with color + """ + + # Shorten the name of WARNING to WARN in order to decrease + # output width / character wrapping + logging.addLevelName(logging.WARNING, 'WARN') + + # If no logtype was set then let's have a sane default + # If connected to a tty, then default to color, else, no color + if not logtype: + if sys.stdout.isatty(): + logtype = 'color' + else: + logtype = 'nocolor' + + # Determine what logging level we should use + if verbose: + logging_level = logging.DEBUG + elif quiet: + logging_level = logging.WARNING + else: + logging_level = logging.INFO + + # Set the format string to use based on the logging level. + # For debug we include more of the filename than for !debug. + # We use -6s spacing to align all logging outputs + if logging_level == logging.DEBUG: + formatstr = '%(levelname)-6s :: - %(longerfilename)s :: %(message)s' + else: + formatstr = '%(levelname)-6s :: %(message)s' + + # Set a tuple of options that will be passed to the formatter. The %s + # will tell the logging library to use seconds since epoch for time stamps + formattup = (formatstr, "%s") + + # Get the loggers and clear out the handlers (allows this function + # to be ran more than once) + logger = logging.getLogger(LOGGER_DEFAULT) + logger.handlers = [] + cockpit_logger = logging.getLogger(LOGGER_COCKPIT) + cockpit_logger.handlers = [] + + if logtype == 'none': + # blank out both loggers + logger.addHandler(logging.NullHandler()) + cockpit_logger.addHandler(logging.NullHandler()) + return + + if logtype == 'cockpit': + # blank out normal log messages + logger.addHandler(logging.NullHandler()) + + # configure cockpit logger + handler = logging.StreamHandler(stream=sys.stdout) + formatter = logging.Formatter('atomicapp.status.%(levelname)s.message=%(message)s') + handler.setFormatter(formatter) + cockpit_logger.addHandler(handler) + cockpit_logger.setLevel(logging_level) + return + + if logtype == 'nocolor': + # blank out cockpit log messages + cockpit_logger.addHandler(logging.NullHandler()) + + # configure logger for basic no color printing to stdout + handler = logging.StreamHandler(stream=sys.stdout) + formatter = customOutputFormatter(*formattup) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging_level) + return + + if logtype == 'color': + # blank out cockpit log messages + cockpit_logger.addHandler(logging.NullHandler()) + + # configure logger for color printing to stdout + handler = logging.StreamHandler(stream=sys.stdout) + formatter = colorizeOutputFormatter(*formattup) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging_level) + return + + # If we made it here then there is an error + raise Exception("Invalid logging output type: {}".format(logtype)) diff --git a/atomicapp/cli/__init__.py b/atomicapp/cli/__init__.py index 9734a072..a38c54ce 100644 --- a/atomicapp/cli/__init__.py +++ b/atomicapp/cli/__init__.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 94119efb..46ac513a 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -22,10 +22,8 @@ import argparse import logging -from lockfile import LockFile -from lockfile import AlreadyLocked -from atomicapp import set_logging +from atomicapp.applogging import Logging from atomicapp.constants import (__ATOMICAPPVERSION__, __NULECULESPECVERSION__, ANSWERS_FILE, @@ -33,13 +31,15 @@ APP_ENT_PATH, CACHE_DIR, HOST_DIR, - LOCK_FILE, + LOGGER_DEFAULT, PROVIDERS) from atomicapp.nulecule import NuleculeManager -from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.exceptions import NuleculeException, DockerException +from atomicapp.plugin import ProviderFailedException from atomicapp.utils import Utils +from atomicapp.index import Index -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) def print_app_location(app_path): @@ -50,76 +50,108 @@ def print_app_location(app_path): def cli_genanswers(args): - try: - argdict = args.__dict__ - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination='none') - nm.genanswers(**argdict) - Utils.rm_dir(nm.app_path) # clean up files - sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + argdict = args.__dict__ + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination='none') + nm.genanswers(**argdict) + Utils.rm_dir(nm.app_path) # clean up files + sys.exit(0) + + +def cli_fetch(args): + argdict = args.__dict__ + destination = argdict['destination'] + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination=destination, + cli_answers=argdict['cli_answers'], + answers_file=argdict['answers'], + answers_format=argdict.get('answers_format')) + nm.fetch(**argdict) + # Clean up the files if the user asked us to. Otherwise + # notify the user where they can manage the application + if destination and destination.lower() == 'none': + Utils.rm_dir(nm.app_path) + else: + print_app_location(nm.app_path) + sys.exit(0) -def cli_install(args): +def cli_run(args): + argdict = args.__dict__ + destination = argdict['destination'] + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination=destination, + cli_answers=argdict['cli_answers'], + answers_file=argdict['answers'], + answers_format=argdict.get('answers_format')) + nm.run(**argdict) + # Clean up the files if the user asked us to. Otherwise + # notify the user where they can manage the application + if destination and destination.lower() == 'none': + Utils.rm_dir(nm.app_path) + else: + print_app_location(nm.app_path) + sys.exit(0) + + +def cli_stop(args): + argdict = args.__dict__ + nm = NuleculeManager(app_spec=argdict['app_spec']) + nm.stop(**argdict) + sys.exit(0) + + +def cli_init(args): try: argdict = args.__dict__ - destination = argdict['destination'] - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination=destination, - answers_file=argdict['answers']) - nm.install(**argdict) - # Clean up the files if the user asked us to. Otherwise - # notify the user where they can manage the application - if destination and destination.lower() == 'none': - Utils.rm_dir(nm.app_path) - else: - print_app_location(nm.app_path) + appdir = NuleculeManager.init(argdict['app_name'], + argdict['destination']) + if appdir: + print('\nAtomic App: %s initialized at %s' % + (argdict['app_name'], appdir)) sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) except Exception as e: logger.error(e, exc_info=True) sys.exit(1) -def cli_run(args): - try: - argdict = args.__dict__ - destination = argdict['destination'] - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination=destination, - answers_file=argdict['answers']) - nm.run(**argdict) - # Clean up the files if the user asked us to. Otherwise - # notify the user where they can manage the application - if destination and destination.lower() == 'none': - Utils.rm_dir(nm.app_path) +def cli_index(args): + argdict = args.__dict__ + i = Index() + if argdict["index_action"] == "list": + i.list() + elif argdict["index_action"] == "update": + i.update() + elif argdict["index_action"] == "generate": + i.generate(argdict["location"]) + sys.exit(0) + + +# Create a custom action parser. Need this because for some args we don't +# want to store a value if the user didn't provide one. "store_true" does +# not allow this; it will always create an attribute and store a value. +class TrueOrFalseAction(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + if values.lower() == 'true': + booleanvalue = True else: - print_app_location(nm.app_path) - sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + booleanvalue = False + setattr(namespace, self.dest, booleanvalue) -def cli_stop(args): +def cli_func_exec(cli_func, cli_func_args): try: - argdict = args.__dict__ - nm = NuleculeManager(app_spec=argdict['app_spec']) - nm.stop(**argdict) - sys.exit(0) + cli_func(cli_func_args) + except DockerException as e: + logger.error(e) + sys.exit(1) except NuleculeException as e: logger.error(e) sys.exit(1) + except ProviderFailedException as e: + logger.error(e) + sys.exit(1) except Exception as e: logger.error(e, exc_info=True) sys.exit(1) @@ -144,17 +176,23 @@ def create_parser(self): formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, description=( - "This will install and run an Atomic App, " + "This will fetch and run an Atomic App, " "a containerized application conforming to the Nulecule Specification")) # Add a help function to the toplevel parser but don't output # help information for it. We need this because of the way we # are stitching help output together from multiple parsers toplevel_parser.add_argument( "-h", - "--help" - "--version", + "--help", action='help', help=argparse.SUPPRESS) + toplevel_parser.add_argument( + "-V", + "--version", + action='version', + version='atomicapp %s, Nulecule Specification %s' % ( + __ATOMICAPPVERSION__, __NULECULESPECVERSION__), + help=argparse.SUPPRESS) # Allow for subparsers of the toplevel_parser. Store the name # in the "action" attribute toplevel_subparsers = toplevel_parser.add_subparsers(dest="action") @@ -163,13 +201,13 @@ def create_parser(self): # Create the globals argument parser next. This will be a # parent parser for the subparsers globals_parser = argparse.ArgumentParser(add_help=False) + # Adding version argument again to avoid optional arguments from + # being listed twice in -h. This only serves the help message. globals_parser.add_argument( "-V", "--version", - action='version', - version='atomicapp %s, Nulecule Specification %s' % ( - __ATOMICAPPVERSION__, __NULECULESPECVERSION__), - help="show the version and exit.") + action="store_true", + help="Show the version and exit.") globals_parser.add_argument( "-v", "--verbose", @@ -184,11 +222,23 @@ def create_parser(self): default=False, action="store_true", help="Quiet output mode.") + globals_parser.add_argument( + "--logtype", + dest="logtype", + choices=['cockpit', 'color', 'nocolor', 'none'], + help=""" + Override the default logging output. The options are: + nocolor: we will only log to stdout; + color: log to stdout with color; + cockpit: used with cockpit integration; + none: atomicapp will disable any logging. + If nothing is set and logging to file then 'nocolor' by default. + If nothing is set and logging to tty then 'color' by default.""") globals_parser.add_argument( "--mode", dest="mode", default=None, - choices=['install', 'run', 'stop', 'genanswers'], + choices=['fetch', 'run', 'stop', 'genanswers'], help=(''' The mode Atomic App is run in. This option has the effect of switching the 'verb' that was passed by the @@ -196,24 +246,57 @@ def create_parser(self): in cases where a user is not using the Atomic App cli directly, but through another interface such as the Atomic CLI. EX: `atomic run --mode=genanswers`''')) - globals_parser.add_argument( + + # === DEPLOY PARSER === + # Create a 'deploy parser' that will include flags related to deploying + # and answers files + deploy_parser = argparse.ArgumentParser(add_help=False) + deploy_parser.add_argument( "--dry-run", dest="dryrun", default=False, action="store_true", help=( "Don't actually call provider. The commands that should be " - "run will be sent to stdout but not run.")) - globals_parser.add_argument( + "run will be logged but not run.")) + deploy_parser.add_argument( "--answers-format", dest="answers_format", default=ANSWERS_FILE_SAMPLE_FORMAT, choices=['ini', 'json', 'xml', 'yaml'], help="The format for the answers.conf.sample file. Default: %s" % ANSWERS_FILE_SAMPLE_FORMAT) + deploy_parser.add_argument( + "--namespace", + dest="namespace", + help=('The namespace to use in the target provider')) + deploy_parser.add_argument( + "--provider-tlsverify", + dest="provider-tlsverify", + action=TrueOrFalseAction, + choices=['True', 'False'], + help=(''' + Value for provider-tlsverify answers option. + --providertlsverify=False to disable tls verification''')) + deploy_parser.add_argument( + "--provider-config", + dest="provider-config", + help='Value for provider-config answers option.') + deploy_parser.add_argument( + "--provider-cafile", + dest="provider-cafile", + help='Value for provider-cafile answers option.') + deploy_parser.add_argument( + "--provider-api", + dest="provider-api", + help='Value for provider-api answers option.') + deploy_parser.add_argument( + "--provider-auth", + dest="provider-auth", + help='Value for provider-auth answers option.') # === "run" SUBPARSER === run_subparser = toplevel_subparsers.add_parser( - "run", parents=[globals_parser]) + "run", parents=[globals_parser, deploy_parser]) run_subparser.add_argument( "-a", "--answers", @@ -225,16 +308,18 @@ def create_parser(self): help="A file which will contain anwsers provided in interactive mode") run_subparser.add_argument( "--provider", - dest="cli_provider", + dest="provider", choices=PROVIDERS, help="The provider to use. Overrides provider value in answerfile.") run_subparser.add_argument( "--ask", default=False, action="store_true", - help="Ask for params even if the defaul value is provided") + help="Ask for params even if the default value is provided") run_subparser.add_argument( "app_spec", + nargs='?', + default=None, help=( "Application to run. This is a container image or a path " "that contains the metadata describing the whole application.")) @@ -243,50 +328,52 @@ def create_parser(self): dest="destination", default=None, help=(''' - Destination directory for install. This defaults to a + Destination directory for fetching. This defaults to a directory under %s. Specify 'none' to not persist files and have them cleaned up when finished.''' % CACHE_DIR)) run_subparser.set_defaults(func=cli_run) - # === "install" SUBPARSER === - install_subparser = toplevel_subparsers.add_parser( - "install", parents=[globals_parser]) - install_subparser.add_argument( + # === "fetch" SUBPARSER === + fetch_subparser = toplevel_subparsers.add_parser( + "fetch", parents=[globals_parser, deploy_parser]) + fetch_subparser.add_argument( "-a", "--answers", dest="answers", help="Path to %s" % ANSWERS_FILE) - install_subparser.add_argument( + fetch_subparser.add_argument( "--no-deps", dest="nodeps", default=False, action="store_true", help="Skip pulling dependencies of the app") - install_subparser.add_argument( + fetch_subparser.add_argument( "-u", "--update", dest="update", default=False, action="store_true", help="Re-pull images and overwrite existing files") - install_subparser.add_argument( + fetch_subparser.add_argument( "--destination", dest="destination", default=None, help=(''' - Destination directory for install. This defaults to a + Destination directory for fetch. This defaults to a directory under %s. Specify 'none' to not persist files and have them cleaned up when finished.''' % CACHE_DIR)) - install_subparser.add_argument( + fetch_subparser.add_argument( "app_spec", + nargs='?', + default=None, help=( "Application to run. This is a container image or a path " "that contains the metadata describing the whole application.")) - install_subparser.set_defaults(func=cli_install) + fetch_subparser.set_defaults(func=cli_fetch) # === "stop" SUBPARSER === stop_subparser = toplevel_subparsers.add_parser( - "stop", parents=[globals_parser]) + "stop", parents=[globals_parser, deploy_parser]) stop_subparser.add_argument( "--provider", dest="cli_provider", @@ -295,7 +382,7 @@ def create_parser(self): stop_subparser.add_argument( "app_spec", help=(''' - Path to the directory where the Atomic App is installed + Path to the directory where the Atomic App is fetched that is to be stopped.''')) stop_subparser.set_defaults(func=cli_stop) @@ -304,9 +391,45 @@ def create_parser(self): "genanswers", parents=[globals_parser]) gena_subparser.add_argument( "app_spec", + nargs='?', + default=None, help='The name of a container image containing an Atomic App.') gena_subparser.set_defaults(func=cli_genanswers) + # === "index" SUBPARSER === + index_subparser = toplevel_subparsers.add_parser( + "index", parents=[globals_parser]) + index_action = index_subparser.add_subparsers(dest="index_action") + + index_list = index_action.add_parser("list") + index_list.set_defaults(func=cli_index) + + index_update = index_action.add_parser("update") + index_update.set_defaults(func=cli_index) + + index_generate = index_action.add_parser("generate") + index_generate.add_argument( + "location", + help=( + "Path containing Nulecule applications " + "which will be part of the generated index")) + index_generate.set_defaults(func=cli_index) + + # === "init" SUBPARSER === + init_subparser = toplevel_subparsers.add_parser( + "init", parents=[globals_parser]) + init_subparser.add_argument( + "app_name", + help="App name.") + init_subparser.add_argument( + "--destination", + dest="destination", + default=None, + help=(''' + Path to the directory where the Atomic App + is to be initialized.''')) + init_subparser.set_defaults(func=cli_init) + # Some final fixups.. We want the "help" from the global # parser to be output when someone runs 'atomicapp --help' # To get that functionality we will add the help from the @@ -314,19 +437,39 @@ def create_parser(self): # suppress the usage message from being output from the # globals parser. globals_parser.usage = argparse.SUPPRESS + deploy_parser.usage = argparse.SUPPRESS toplevel_parser.epilog = globals_parser.format_help() + toplevel_parser.epilog = deploy_parser.format_help() # Return the toplevel parser return toplevel_parser def run(self): cmdline = sys.argv[1:] # Grab args from cmdline + if len(cmdline) == 0: + cmdline = ['-h'] # Show help if no arguments are given + # Initial setup of logging (to allow for a few early debug statements) + Logging.setup_logging(verbose=True, quiet=False) # If we are running in an openshift pod (via `oc new-app`) then # there is no cmdline but we want to default to "atomicapp run". - # In this case copy files to cwd and use the working directory. if Utils.running_on_openshift(): - cmdline = 'run -v --dest=none /{}'.format(APP_ENT_PATH).split() + cmdline = 'run -v --dest=none --provider=openshift /{}' + cmdline = cmdline.format(APP_ENT_PATH).split() # now a list + + # If the user has elected to provide all arguments via the + # ATOMICAPP_ARGS environment variable then set it now + argstr = os.environ.get('ATOMICAPP_ARGS') + if argstr: + logger.debug("Setting cmdline args to: {}".format(argstr)) + cmdline = argstr.split() + + # If the user has elected to provide some arguments via the + # ATOMICAPP_APPEND_ARGS environment variable then add those now + argstr = os.environ.get('ATOMICAPP_APPEND_ARGS') + if argstr: + logger.debug("Appending args to cmdline: {}".format(argstr)) + cmdline.extend(argstr.split()) # We want to be able to place options anywhere on the command # line. We have added all global options to each subparser, @@ -337,26 +480,46 @@ def run(self): # NOTE: Also allow "mode" to override 'action' if specified args, _ = self.parser.parse_known_args(cmdline) cmdline.remove(args.action) # Remove 'action' from the cmdline - if args.mode: + if hasattr(args, 'mode') and args.mode: args.action = args.mode # Allow mode to override 'action' cmdline.insert(0, args.action) # Place 'action' at front - logger.info("Action/Mode Selected is: %s" % args.action) # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) - # Set logging level - if args.verbose: - set_logging(level=logging.DEBUG) - elif args.quiet: - set_logging(level=logging.WARNING) - else: - set_logging(level=logging.INFO) + # Setup logging (now with arguments from cmdline) and log a few msgs + Logging.setup_logging(args.verbose, args.quiet, args.logtype) + + logger.info("Atomic App: %s - Mode: %s" + % (__ATOMICAPPVERSION__, + str(args.action).capitalize())) + + logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) + + # In the case of Atomic CLI we want to allow the user to specify + # a directory if they want to for "run". For that reason we won't + # default the RUN label for Atomic App to provide an app_spec argument. + # In this case pick up app_spec from $IMAGE env var (set by RUN label). + if args.action != 'init' and args.action != 'index' and args.app_spec is None: + if os.environ.get('IMAGE') is not None: + logger.debug("Setting app_spec based on $IMAGE env var") + args.app_spec = os.environ['IMAGE'] + else: + print("Error. Too few arguments. Must provide app_spec.") + print("Run with '--help' for more info") + sys.exit(1) + + # Take the arguments that correspond to "answers" config file data + # and make a dictionary of it to pass along in args. + setattr(args, 'cli_answers', {}) + for item in ['provider-api', 'provider-cafile', 'provider-auth', + 'provider-config', 'provider-tlsverify', 'namespace', + 'provider']: + if hasattr(args, item) and getattr(args, item) is not None: + args.cli_answers[item] = getattr(args, item) - lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: - lock.acquire(timeout=-1) - args.func(args) + cli_func_exec(args.func, args) except AttributeError: if hasattr(args, 'func'): raise @@ -364,8 +527,6 @@ def run(self): self.parser.print_help() except KeyboardInterrupt: pass - except AlreadyLocked: - logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.") except Exception as ex: if args.verbose: raise @@ -373,14 +534,12 @@ def run(self): logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information.") - finally: - if lock.i_am_locking(): - lock.release() def main(): cli = CLI() cli.run() + if __name__ == '__main__': main() diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 2827d8d2..a441c235 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.3.0' +__ATOMICAPPVERSION__ = '0.6.4' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" @@ -40,6 +40,7 @@ DEFAULTNAME_KEY = "default" PROVIDER_KEY = "provider" NAMESPACE_KEY = "namespace" +NAMESPACE_SEPARATOR = ":" REQUIREMENTS_KEY = "requirements" # Nulecule spec terminology vs the function within /providers @@ -53,7 +54,10 @@ ANSWERS_FILE_SAMPLE = "answers.conf.sample" ANSWERS_FILE_SAMPLE_FORMAT = 'ini' WORKDIR = ".workdir" -LOCK_FILE = "/run/lock/atomicapp.lock" + +LOGGER_DEFAULT = "atomicapp" +LOGGER_COCKPIT = "cockpit" + HOST_DIR = "/host" DEFAULT_PROVIDER = "kubernetes" @@ -61,14 +65,29 @@ DEFAULT_NAMESPACE = "default" DEFAULT_ANSWERS = { "general": { - "provider": DEFAULT_PROVIDER, "namespace": DEFAULT_NAMESPACE } } PROVIDERS = ["docker", "kubernetes", "openshift", "marathon"] -PROVIDER_API_KEY = "providerapi" -ACCESS_TOKEN_KEY = "accesstoken" -PROVIDER_CONFIG_KEY = "providerconfig" -PROVIDER_TLS_VERIFY_KEY = "providertlsverify" -PROVIDER_CA_KEY = "providercafile" +PROVIDER_API_KEY = "provider-api" +PROVIDER_AUTH_KEY = "provider-auth" +PROVIDER_CONFIG_KEY = "provider-config" +PROVIDER_TLS_VERIFY_KEY = "provider-tlsverify" +PROVIDER_CA_KEY = "provider-cafile" + +K8S_DEFAULT_API = "http://localhost:8080" +OC_DEFAULT_API = "http://localhost:8443" + +# Persistent Storage Formats +PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] + +# If running in an openshift POD via `oc new-app`, the ca file is here +OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" + +# Index +INDEX_IMAGE = "projectatomic/nulecule-library" +INDEX_DEFAULT_IMAGE_LOCATION = "localhost" +INDEX_NAME = "index.yaml" +INDEX_LOCATION = ".atomicapp/" + INDEX_NAME +INDEX_GEN_DEFAULT_OUTPUT_LOC = "./" + INDEX_NAME diff --git a/atomicapp/index.py b/atomicapp/index.py new file mode 100644 index 00000000..e9799d78 --- /dev/null +++ b/atomicapp/index.py @@ -0,0 +1,203 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +from __future__ import print_function +import os + +import logging +import errno +from constants import (INDEX_IMAGE, + INDEX_LOCATION, + INDEX_DEFAULT_IMAGE_LOCATION, + INDEX_GEN_DEFAULT_OUTPUT_LOC, + INDEX_NAME) +from nulecule.container import DockerHandler +from nulecule.base import Nulecule +from atomicapp.nulecule.exceptions import NuleculeException + +from copy import deepcopy + +import anymarkup +from atomicapp.utils import Utils + +logger = logging.getLogger(__name__) + + +class IndexException(Exception): + pass + + +class Index(object): + + """ + This class represents the 'index' command for Atomic App. This lists + all available packaged applications to use. + """ + + index_template = {"location": ".", "nulecules": []} + + def __init__(self): + + self.index = deepcopy(self.index_template) + self.index_location = os.path.join(Utils.getUserHome(), INDEX_LOCATION) + self._load_index_file(self.index_location) + + def list(self): + """ + This command lists all available Nulecule packaged applications in a + properly formatted way. + """ + + # In order to "format" it correctly, find the largest length of 'name', 'id', and 'appversion' + # Set a minimum length of '7' due to the length of each column name + id_length = 7 + app_length = 7 + location_length = 7 + + # Loop through each 'nulecule' and retrieve the largest string length + for entry in self.index["nulecules"]: + id = entry.get('id') or "" + version = entry['metadata'].get('appversion') or "" + location = entry['metadata'].get('location') or INDEX_DEFAULT_IMAGE_LOCATION + + if len(id) > id_length: + id_length = len(id) + if len(version) > app_length: + app_length = len(version) + if len(location) > location_length: + location_length = len(location) + + # Print out the "index bar" with the lengths + index_format = ("{0:%s} {1:%s} {2:10} {3:%s}" % (id_length, app_length, location_length)) + print(index_format.format("ID", "VER", "PROVIDERS", "LOCATION")) + + # Loop through each entry of the index and spit out the formatted line + for entry in self.index["nulecules"]: + # Get the list of providers (first letter) + providers = "" + for provider in entry["providers"]: + providers = "%s,%s" % (providers, provider[0].capitalize()) + + # Remove the first element, add brackets + providers = "{%s}" % providers[1:] + + # Retrieve the entry information + id = entry.get('id') or "" + version = entry['metadata'].get('appversion') or "" + location = entry['metadata'].get('location') or INDEX_DEFAULT_IMAGE_LOCATION + + # Print out the row + print(index_format.format( + id, + version, + providers, + location)) + + def update(self, index_image=INDEX_IMAGE): + """ + Fetch the latest index image and update the file based upon + the INDEX_IMAGE attribute. By default, this should pull the + 'official' Nulecule index. + """ + + logger.info("Updating the index list") + logger.info("Pulling latest index image...") + self._fetch_index_container() + logger.info("Index updated") + + # TODO: Error out if the locaiton does not have a Nulecule file / dir + def generate(self, location, output_location=INDEX_GEN_DEFAULT_OUTPUT_LOC): + """ + Generate an index.yaml with a provided directory location + """ + logger.info("Generating index.yaml from %s" % location) + self.index = deepcopy(self.index_template) + + if not os.path.isdir(location): + raise Exception("Location must be a directory") + + for f in os.listdir(location): + nulecule_dir = os.path.join(location, f) + if f.startswith("."): + continue + if os.path.isdir(nulecule_dir): + try: + index_info = self._nulecule_get_info(nulecule_dir) + except NuleculeException as e: + logger.warning("SKIPPING %s. %s" % + (nulecule_dir, e)) + continue + index_info["path"] = f + self.index["nulecules"].append(index_info) + + if len(index_info) > 0: + anymarkup.serialize_file(self.index, output_location, format="yaml") + logger.info("index.yaml generated") + + def _fetch_index_container(self, index_image=INDEX_IMAGE): + """ + Fetch the index container + """ + # Create the ".atomicapp" dir if it does not exist + if not os.path.exists(os.path.dirname(self.index_location)): + try: + os.makedirs(os.path.dirname(self.index_location)) + except OSError as exc: # Guard against race condition + if exc.errno != errno.EEXIST: + raise + + dh = DockerHandler() + dh.pull(index_image) + dh.extract_files(index_image, "/" + INDEX_NAME, self.index_location) + + def _load_index_file(self, index_file=INDEX_LOCATION): + """ + Load the index file. If it does not exist, fetch it. + """ + # If the file/path does not exist, retrieve the index yaml + if not os.path.exists(index_file): + logger.warning("Couldn't load index file: %s", index_file) + logger.info("Retrieving index...") + self._fetch_index_container() + self.index = anymarkup.parse_file(index_file) + + def _nulecule_get_info(self, nulecule_dir): + """ + Get the required information in order to generate an index.yaml + """ + index_info = {} + nulecule = Nulecule.load_from_path( + nulecule_dir, nodeps=True) + index_info["id"] = nulecule.id + index_info["metadata"] = nulecule.metadata + index_info["specversion"] = nulecule.specversion + + if len(nulecule.components) == 0: + raise IndexException("Unable to load any Nulecule components from folder %s" % nulecule_dir) + + providers_set = set() + for component in nulecule.components: + if component.artifacts: + if len(providers_set) == 0: + providers_set = set(component.artifacts.keys()) + else: + providers_set = providers_set.intersection(set(component.artifacts.keys())) + + index_info["providers"] = list(providers_set) + return index_info diff --git a/atomicapp/nulecule/__init__.py b/atomicapp/nulecule/__init__.py index 7cf82d51..9da0a487 100644 --- a/atomicapp/nulecule/__init__.py +++ b/atomicapp/nulecule/__init__.py @@ -1,3 +1,21 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" from __future__ import absolute_import from .main import NuleculeManager diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 59fcdfb1..d0d76347 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -1,8 +1,27 @@ # -*- coding: utf-8 -*- +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import anymarkup -import copy import logging import os +import yaml +import re from collections import defaultdict from string import Template @@ -10,23 +29,27 @@ from atomicapp.constants import (APP_ENT_PATH, EXTERNAL_APP_DIR, GLOBAL_CONF, + LOGGER_COCKPIT, + LOGGER_DEFAULT, MAIN_FILE, RESOURCE_KEY, PARAMS_KEY, NAME_KEY, INHERIT_KEY, ARTIFACTS_KEY, - REQUIREMENTS_KEY) + NAMESPACE_SEPARATOR) from atomicapp.utils import Utils from atomicapp.requirements import Requirements from atomicapp.nulecule.lib import NuleculeBase from atomicapp.nulecule.container import DockerHandler from atomicapp.nulecule.exceptions import NuleculeException -from atomicapp.providers.openshift import OpenShiftProvider +from atomicapp.providers.openshift import OpenshiftProvider from jsonpointer import resolve_pointer, set_pointer, JsonPointerException +from anymarkup import AnyMarkupError -logger = logging.getLogger(__name__) +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) class Nulecule(NuleculeBase): @@ -38,7 +61,7 @@ class Nulecule(NuleculeBase): componenents, but does not have access to its parent's scope. """ - def __init__(self, id, specversion, metadata, graph, basepath, + def __init__(self, id, specversion, graph, basepath, metadata=None, requirements=None, params=None, config=None, namespace=GLOBAL_CONF): """ @@ -47,12 +70,12 @@ def __init__(self, id, specversion, metadata, graph, basepath, Args: id (str): Nulecule application ID specversion (str): Nulecule spec version - metadata (dict): Nulecule metadata graph (list): Nulecule graph of components basepath (str): Basepath for Nulecule application + metadata (dict): Nulecule metadata requirements (dict): Requirements for the Nulecule application params (list): List of params for the Nulecule application - config (dict): Config data for the Nulecule application + config (atomicapp.nulecule.config.Config): Config data namespace (str): Namespace of the current Nulecule application Returns: @@ -61,10 +84,10 @@ def __init__(self, id, specversion, metadata, graph, basepath, super(Nulecule, self).__init__(basepath, params, namespace) self.id = id self.specversion = specversion - self.metadata = metadata + self.metadata = metadata or {} self.graph = graph self.requirements = requirements - self.config = config or {} + self.config = config @classmethod def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, @@ -77,7 +100,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, image (str): A Docker image name. dest (str): Destination path where Nulecule data from Docker image should be extracted. - config (dict): Dictionary, config data for Nulecule application. + config: An instance of atomicapp.nulecule.config.Config namespace (str): Namespace for Nulecule application. nodeps (bool): Don't pull external Nulecule dependencies when True. @@ -87,18 +110,20 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, Returns: A Nulecule instance, or None in case of dry run. """ - logger.info('Unpacking image: %s to %s' % (image, dest)) + logger.info('Unpacking image %s to %s' % (image, dest)) if Utils.running_on_openshift(): # pass general config data containing provider specific data # to Openshift provider - op = OpenShiftProvider(config.get('general', {}), './', False) + op = OpenshiftProvider(config.globals, './', False) op.artifacts = [] op.init() op.extract(image, APP_ENT_PATH, dest, update) else: docker_handler = DockerHandler(dryrun=dryrun) docker_handler.pull(image) - docker_handler.extract(image, APP_ENT_PATH, dest, update) + docker_handler.extract_nulecule_data(image, APP_ENT_PATH, dest, update) + cockpit_logger.info("All dependencies installed successfully.") + return cls.load_from_path( dest, config=config, namespace=namespace, nodeps=nodeps, dryrun=dryrun, update=update) @@ -112,21 +137,45 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, Args: src (str): Path to load Nulecule application from. - config (dict): Config data for Nulecule application. + config (atomicapp.nulecule.config.Config): Config data for + Nulecule application. namespace (str): Namespace for Nulecule application. nodeps (bool): Do not pull external applications if True. dryrun (bool): Do not make any change to underlying host. update (bool): Update existing application if True, else reuse it. Returns: - A Nulecule instance or None in case of some dry run (installing - from image). + A Nulecule instance or None in case of some dry run (fetching + an image). """ nulecule_path = os.path.join(src, MAIN_FILE) + + if os.path.exists(nulecule_path): + with open(nulecule_path, 'r') as f: + nulecule_data = f.read() + else: + raise NuleculeException("No Nulecule file exists in directory: %s" % src) + if dryrun and not os.path.exists(nulecule_path): - raise NuleculeException("Installed Nulecule components are required to initiate dry-run. " + raise NuleculeException("Fetched Nulecule components are required to initiate dry-run. " "Please specify your app via atomicapp --dry-run /path/to/your-app") - nulecule_data = anymarkup.parse_file(nulecule_path) + + # By default, AnyMarkup converts all formats to YAML when parsing. + # Thus the rescue works either on JSON or YAML. + try: + nulecule_data = anymarkup.parse(nulecule_data) + except (yaml.parser.ParserError, AnyMarkupError), e: + line = re.search('line (\d+)', str(e)).group(1) + column = re.search('column (\d+)', str(e)).group(1) + + output = "" + for i, l in enumerate(nulecule_data.splitlines()): + if (i == int(line) - 1) or (i == int(line)) or (i == int(line) + 1): + output += "%s %s\n" % (str(i), str(l)) + + raise NuleculeException("Failure parsing %s file. Validation error on line %s, column %s:\n%s" + % (nulecule_path, line, column, output)) + nulecule = Nulecule(config=config, basepath=src, namespace=namespace, **nulecule_data) nulecule.load_components(nodeps, dryrun) @@ -146,18 +195,16 @@ def run(self, provider_key=None, dryrun=False): """ provider_key, provider = self.get_provider(provider_key, dryrun) - # Process preliminary requirements - # Pass configuration, path of the app, graph, provider as well as dry-run - # for provider init() - if REQUIREMENTS_KEY in self.graph[0]: - logger.debug("Requirements key detected. Running action.") - r = Requirements(self.config, self.basepath, self.graph[0][REQUIREMENTS_KEY], - provider_key, dryrun) - r.run() + # Process preliminary requirements before componenets + if self.requirements: + logger.debug("Requirements detected. Running action.") + Requirements(self.config, self.basepath, self.requirements, + provider_key, dryrun).run() # Process components for component in self.components: component.run(provider_key, dryrun) + cockpit_logger.info("Component %s installed successfully" % provider_key) def stop(self, provider_key=None, dryrun=False): """ @@ -176,11 +223,6 @@ def stop(self, provider_key=None, dryrun=False): for component in self.components: component.stop(provider_key, dryrun) - # TODO: NOT YET IMPLEMENTED - def uninstall(self): - for component in self.components: - component.uninstall() - def load_config(self, config=None, ask=False, skip_asking=False): """ Load config data for the entire Nulecule application, by traversing @@ -189,21 +231,23 @@ def load_config(self, config=None, ask=False, skip_asking=False): It updates self.config. Args: - config (dict): Existing config data, may be from ANSWERS - file or any other source. + config (atomicapp.nulecule.config.Config): Existing config data, + may be from ANSWERS file or any other source. Returns: None """ + if config is None: + config = self.config super(Nulecule, self).load_config( config=config, ask=ask, skip_asking=skip_asking) + for component in self.components: # FIXME: Find a better way to expose config data to components. # A component should not get access to all the variables, # but only to variables it needs. - component.load_config(config=copy.deepcopy(self.config), + component.load_config(config=config, ask=ask, skip_asking=skip_asking) - self.merge_config(self.config, component.config) def load_components(self, nodeps=False, dryrun=False): """ @@ -224,8 +268,8 @@ def load_components(self, nodeps=False, dryrun=False): node_name = node[NAME_KEY] source = Utils.getSourceImage(node) component = NuleculeComponent( - node_name, self.basepath, source, - node.get(PARAMS_KEY), node.get(ARTIFACTS_KEY), + self._get_component_namespace(node_name), self.basepath, + source, node.get(PARAMS_KEY), node.get(ARTIFACTS_KEY), self.config) component.load(nodeps, dryrun) components.append(component) @@ -248,6 +292,24 @@ def render(self, provider_key=None, dryrun=False): for component in self.components: component.render(provider_key=provider_key, dryrun=dryrun) + def _get_component_namespace(self, component_name): + """ + Get a unique namespace for a Nulecule graph item, by concatinating + the namespace of the current Nulecule (which could be the root Nulecule + app or a child or external Nulecule app) and name of the Nulecule + graph item. + + Args: + component_name (str): Name of the Nulecule graph item + + Returns: + A string + """ + current_namespace = '' if self.namespace == GLOBAL_CONF else self.namespace + return ( + '%s%s%s' % (current_namespace, NAMESPACE_SEPARATOR, component_name) + if current_namespace else component_name) + class NuleculeComponent(NuleculeBase): @@ -273,6 +335,7 @@ def load(self, nodeps=False, dryrun=False): """ Load external application of the Nulecule component. """ + cockpit_logger.info("Loading app %s ." % self.name) if self.source: if nodeps: logger.info( @@ -284,13 +347,14 @@ def run(self, provider_key, dryrun=False): """ Run the Nulecule component with the specified provider, """ + cockpit_logger.info("Deploying component %s ..." % self.name) if self._app: self._app.run(provider_key, dryrun) return provider_key, provider = self.get_provider(provider_key, dryrun) provider.artifacts = self.rendered_artifacts.get(provider_key, []) provider.init() - provider.deploy() + provider.run() def stop(self, provider_key=None, dryrun=False): """ @@ -302,18 +366,19 @@ def stop(self, provider_key=None, dryrun=False): provider_key, provider = self.get_provider(provider_key, dryrun) provider.artifacts = self.rendered_artifacts.get(provider_key, []) provider.init() - provider.undeploy() + provider.stop() def load_config(self, config=None, ask=False, skip_asking=False): """ Load config for the Nulecule component. """ + if config is None: + config = self.config super(NuleculeComponent, self).load_config( config, ask=ask, skip_asking=skip_asking) if isinstance(self._app, Nulecule): - self._app.load_config(config=copy.deepcopy(self.config), + self._app.load_config(config=self.config, ask=ask, skip_asking=skip_asking) - self.merge_config(self.config, self._app.config) def load_external_application(self, dryrun=False, update=False): """ @@ -333,12 +398,13 @@ def load_external_application(self, dryrun=False, update=False): self.basepath, EXTERNAL_APP_DIR, self.name) if os.path.isdir(external_app_path) and not update: logger.info( - 'Found existing external application for %s. ' - 'Loading it.' % self.name) + 'Found existing external application: %s ' + 'Loading: ' % self.name) nulecule = Nulecule.load_from_path( - external_app_path, dryrun=dryrun, update=update) + external_app_path, dryrun=dryrun, update=update, + namespace=self.namespace) elif not dryrun: - logger.info('Pulling external application for %s.' % self.name) + logger.info('Pulling external application: %s' % self.name) nulecule = Nulecule.unpack( self.source, external_app_path, @@ -347,7 +413,13 @@ def load_external_application(self, dryrun=False, update=False): dryrun=dryrun, update=update ) + + # When pulling an external application, make sure that the + # "external" folder is owned by the respective user extracting it + # by providing the basepath of the extraction + Utils.setFileOwnerGroup(self.basepath) self._app = nulecule + cockpit_logger.info("Copied app successfully.") @property def components(self): @@ -374,11 +446,15 @@ def render(self, provider_key=None, dryrun=False): if self._app: self._app.render(provider_key=provider_key, dryrun=dryrun) return - context = self.get_context() + + if self.artifacts is None: + raise NuleculeException( + "No artifacts specified in the Nulecule file") if provider_key and provider_key not in self.artifacts: raise NuleculeException( "Data for provider \"%s\" are not part of this app" % provider_key) + context = self.config.context(self.namespace) for provider in self.artifacts: if provider_key and provider != provider_key: continue @@ -399,6 +475,13 @@ def get_artifact_paths_for_provider(self, provider_key): """ artifact_paths = [] artifacts = self.artifacts.get(provider_key) + + # If there are no artifacts for the requested provider then error + # This can happen for incorrectly named inherited provider (#435) + if artifacts is None: + raise NuleculeException( + "No artifacts for provider {}".format(provider_key)) + for artifact in artifacts: # Convert dict if the Nulecule file references "resource" if isinstance(artifact, dict) and artifact.get(RESOURCE_KEY): @@ -532,6 +615,8 @@ def _get_artifact_paths_for_path(self, path): immediate children, i.e., we do not deal with nested artifact directories at this moment. + If a file or directory is not found, raise an exception. + Args: path (str): Local path @@ -542,9 +627,14 @@ def _get_artifact_paths_for_path(self, path): if os.path.isfile(path): artifact_paths.append(path) elif os.path.isdir(path): + if os.listdir(path) == []: + raise NuleculeException("Artifact directory %s is empty" % path) for dir_child in os.listdir(path): dir_child_path = os.path.join(path, dir_child) if dir_child.startswith('.') or os.path.isdir(dir_child_path): continue artifact_paths.append(dir_child_path) + else: + raise NuleculeException("Unable to find artifact %s" % path) + return artifact_paths diff --git a/atomicapp/nulecule/config.py b/atomicapp/nulecule/config.py new file mode 100644 index 00000000..b1d92b31 --- /dev/null +++ b/atomicapp/nulecule/config.py @@ -0,0 +1,173 @@ +import copy +import logging + +from atomicapp.constants import (GLOBAL_CONF, + LOGGER_COCKPIT, + DEFAULT_PROVIDER, + DEFAULT_ANSWERS) +from collections import defaultdict + +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) + + +class Config(object): + """ + This class allows to store config data in different scopes along with + source info for the data. When fetching the value for a key in a scope, + the source info and the PRIORITY order of sources is taken into account. + + Data sources: + cli: Config data coming from the CLI + runtime: Config data resolved during atomic app runtime. For example, + when the value for a parameter in a Nulecule or Nulecule graph + item is missing in answers data, we first try to load the default + value for the parameter. When there's no default value, or when + the user has specified to forcefully ask the user for values, we + ask the user for data. These data collected/resolved during runtime + form the runtime data. + answers: Config data coming from answers file + defaults: Default config data specified in atomicapp/constants.py + + The priority order of the data sources is: + cli > runtime > answers > defaults + """ + + PRIORITY = ( + 'cli', + 'runtime', + 'answers', + 'defaults' + ) + + def __init__(self, answers=None, cli=None): + """ + Initialize a Config instance. + + Args: + answers (dict): Answers data + cli (dict): CLI data + """ + answers = answers or {} + cli = cli or {} + # We use a defaultdict of defaultdicts so that we can avoid doing + # redundant checks in a nested dictionary if the value of the keys + # are dictionaries or None. + self._data = defaultdict(defaultdict) + # Initialize default data dict + self._data['defaults'] = defaultdict(defaultdict) + # Initialize answers data dict + self._data['answers'] = defaultdict(defaultdict) + # Initialize cli data dict + self._data['cli'] = defaultdict(defaultdict) + # Initialize runtime data dict + self._data['runtime'] = defaultdict(defaultdict) + + # Load default answers + for scope, data in DEFAULT_ANSWERS.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source='defaults') + self.set('provider', DEFAULT_PROVIDER, scope=GLOBAL_CONF, source='defaults') + + # Load answers data + for scope, data in answers.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source='answers') + + # Load cli data + for key, value in cli.items(): + self.set(key, value, scope=GLOBAL_CONF, source='cli') + + def get(self, key, scope=GLOBAL_CONF, ignore_sources=[]): + """ + Get the value of a key in a scope. This takes care of resolving + the value by going through the PRIORITY order of the various + sources of data. + + Args: + key (str): Key + scope (str): Scope from which to fetch the value for the key + + Returns: + Value for the key. + """ + for source in self.PRIORITY: + if source in ignore_sources: + continue + value = self._data[source][scope].get(key) or self._data[source][ + GLOBAL_CONF].get(key) + if value: + return value + return None + + def set(self, key, value, source, scope=GLOBAL_CONF): + """ + Set the value for a key within a scope along with specifying the + source of the value. + + Args: + key (str): Key + value: Value + scope (str): Scope in which to store the value + source (str): Source of the value + """ + self._data[source][scope][key] = value + + def context(self, scope=GLOBAL_CONF): + """ + Get context data for the scope of Nulecule graph item by aggregating + the data from various sources taking their priority order into + account. This context data, which is a flat dictionary, is used to + render the variables in the artifacts of Nulecule graph item. + + Args: + scope (str): Scope (or namespace) for the Nulecule graph item. + Returns: + A dictionary + """ + result = {} + for source in reversed(self.PRIORITY): + source_data = self._data[source] + result.update(copy.deepcopy(source_data.get(GLOBAL_CONF) or {})) + if scope != GLOBAL_CONF: + result.update(copy.deepcopy(source_data.get(scope) or {})) + return result + + def runtime_answers(self): + """ + Get runtime answers. + + Returns: + A defaultdict containing runtime answers data. + """ + answers = defaultdict(dict) + + for source in reversed(self.PRIORITY): + for scope, data in (self._data.get(source) or {}).items(): + answers[scope].update(copy.deepcopy(data)) + + # Remove empty sections for answers + for key, value in answers.items(): + if not value: + answers.pop(key) + + return answers + + def update_source(self, source, data): + """ + Update answers data for a source. + + Args: + source (str): Source name + data (dict): Answers data + """ + data = data or {} + if source not in self._data: + raise + + # clean up source data + for k in self._data[source]: + self._data[source].pop(k) + + for scope, data in data.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source=source) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 77b24e94..edf0dae5 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -1,17 +1,38 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import os import subprocess import uuid import logging -from atomicapp.constants import (APP_ENT_PATH, +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT, MAIN_FILE) from atomicapp.utils import Utils -from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.exceptions import NuleculeException, DockerException -logger = logging.getLogger(__name__) +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) class DockerHandler(object): + """Interface to interact with Docker.""" def __init__(self, dryrun=False, docker_cli='/usr/bin/docker'): @@ -27,10 +48,18 @@ def __init__(self, dryrun=False, docker_cli='/usr/bin/docker'): except subprocess.CalledProcessError as e: if "client and server don't have same version" in e.output \ or "client is newer than server" in e.output: - print("\nThe docker version in this Atomic App differs " - "greatly from the host version.\nPlease use a different " - "Atomic App version for this host.\n") - raise e + raise DockerException("\nThe docker version in this " + "Atomic App differs greatly from " + "the host version.\nPlease use a " + "different Atomic App version for " + "this host.\n") + elif "Is your docker daemon up and running" in e.output or \ + "Are you trying to connect to a TLS-enabled daemon " \ + "without TLS" in e.output: + raise DockerException("Could not connect to the " + "docker daemon.") + else: + raise DockerException(e.output) def pull(self, image, update=False): """ @@ -45,65 +74,104 @@ def pull(self, image, update=False): None """ if not self.is_image_present(image) or update: - logger.info('Pulling Docker image: %s' % image) + logger.info('Pulling docker image: %s' % image) + cockpit_logger.info('Pulling docker image: %s' % image) pull_cmd = [self.docker_cli, 'pull', image] logger.debug(' '.join(pull_cmd)) - if not self.dryrun: - subprocess.call(pull_cmd) else: - logger.info('Skipping pulling Docker image: %s' % image) + logger.info('Skipping pulling docker image: %s' % image) + return + + if self.dryrun: + logger.info("DRY-RUN: %s", pull_cmd) + return + + try: + subprocess.check_output(pull_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise DockerException("Could not pull docker image: %s.\n%s" % (image, e.output)) - def extract(self, image, source, dest, update=False): + cockpit_logger.info('Skipping pulling docker image: %s' % image) + + def extract_files(self, image, source, dest): """ - Extracts content from a directory in a Docker image to specified + Extracts a directory/file in a Docker image to a specified destination. Args: image (str): Docker image name source (str): Source directory in Docker image to copy from dest (str): Path to destination directory on host - update (bool): Update destination directory if it exists when - True Returns: None """ logger.info( - 'Extracting nulecule data from image: %s to %s' % (image, dest)) + 'Copying files from image %s:%s to %s' % (image, source, dest)) if self.dryrun: return - # Create dummy container + # Create a dummy container in order to retrieve the file(s) run_cmd = [ self.docker_cli, 'create', '--entrypoint', '/bin/true', image] logger.debug('Creating docker container: %s' % ' '.join(run_cmd)) container_id = subprocess.check_output(run_cmd).strip() - # Copy files out of dummy container to tmpdir - tmpdir = '/tmp/nulecule-{}'.format(uuid.uuid1()) + # Copy files out of dummy container to the destination directory cp_cmd = [self.docker_cli, 'cp', - '%s:/%s' % (container_id, source), - tmpdir] + '%s:/%s' % (container_id, source), dest] logger.debug( - 'Copying data from Docker container: %s' % ' '.join(cp_cmd)) - subprocess.call(cp_cmd) + 'Copying data from docker container: %s' % ' '.join(cp_cmd)) + try: + subprocess.check_output(cp_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise DockerException('Copying data from docker container failed: %s. \n%s' % (cp_cmd, e.output)) + + # Clean up dummy container + rm_cmd = [self.docker_cli, 'rm', '-f', container_id] + logger.debug('Removing docker container: %s' % ' '.join(rm_cmd)) + try: + subprocess.check_output(rm_cmd) + except subprocess.CalledProcessError as e: + raise DockerException('Removing docker container failed: %s. \n%s' % (rm_cmd, e.output)) - # There has been some inconsistent behavior where docker cp - # will either copy out the entire dir /APP_ENT_PATH/*files* or - # it will copy out just /*files* without APP_ENT_PATH. Detect - # that here and adjust accordingly. - src = os.path.join(tmpdir, APP_ENT_PATH) - if not os.path.exists(src): - src = tmpdir + # Set the proper permissions on the extracted folder + Utils.setFileOwnerGroup(dest) + + def extract_nulecule_data(self, image, source, dest, update=False): + """ + Extract the Nulecule contents from a container into a destination + directory. + + Args: + image (str): Docker image name + source (str): Source directory in Docker image to copy from + dest (str): Path to destination directory on host + update (bool): Update destination directory if it exists when + True + + Returns: + None + """ + logger.info( + 'Extracting Nulecule data from image %s to %s' % (image, dest)) + if self.dryrun: + return + + # Create a temporary directory for extraction + tmpdir = '/tmp/nulecule-{}'.format(uuid.uuid1()) + + self.extract_files(image, source=source, dest=tmpdir) # If the application already exists locally then need to # make sure the local app id is the same as the one requested # on the command line. mainfile = os.path.join(dest, MAIN_FILE) - tmpmainfile = os.path.join(src, MAIN_FILE) + tmpmainfile = os.path.join(tmpdir, MAIN_FILE) if os.path.exists(mainfile): existing_id = Utils.getAppId(mainfile) new_id = Utils.getAppId(tmpmainfile) + cockpit_logger.info("Loading app_id %s" % new_id) if existing_id != new_id: raise NuleculeException( "Existing app (%s) and requested app (%s) differ" % @@ -115,16 +183,16 @@ def extract(self, image, source, dest, update=False): logger.info("App exists locally and no update requested") return - # Copy files - logger.debug('Copying nulecule data from %s to %s' % (src, dest)) - Utils.copy_dir(src, dest, update) + # Copy files from tmpdir into place + logger.debug('Copying nulecule data from %s to %s' % (tmpdir, dest)) + Utils.copy_dir(tmpdir, dest, update) + + # Clean up tmpdir logger.debug('Removing tmp dir: %s' % tmpdir) Utils.rm_dir(tmpdir) - # Clean up dummy container - rm_cmd = [self.docker_cli, 'rm', '-f', container_id] - logger.debug('Removing Docker container: %s' % ' '.join(rm_cmd)) - subprocess.call(rm_cmd) + # Set the proper permissions on the extracted folder + Utils.setFileOwnerGroup(dest) def is_image_present(self, image): """ diff --git a/atomicapp/nulecule/exceptions.py b/atomicapp/nulecule/exceptions.py index ca2dd159..02bf391f 100644 --- a/atomicapp/nulecule/exceptions.py +++ b/atomicapp/nulecule/exceptions.py @@ -1,4 +1,26 @@ # -*- coding: utf-8 -*- +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + + +class DockerException(Exception): + pass class NuleculeException(Exception): diff --git a/atomicapp/nulecule/external/templates/nulecule/Dockerfile.tpl b/atomicapp/nulecule/external/templates/nulecule/Dockerfile.tpl new file mode 100644 index 00000000..6fff6d69 --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/Dockerfile.tpl @@ -0,0 +1,9 @@ +FROM projectatomic/atomicapp:${atomicapp_version} + +MAINTAINER Your Name + +LABEL io.projectatomic.nulecule.providers="kubernetes,docker,marathon" \ + io.projectatomic.nulecule.specversion="${nulecule_spec_version}" + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts diff --git a/atomicapp/nulecule/external/templates/nulecule/Nulecule.tpl b/atomicapp/nulecule/external/templates/nulecule/Nulecule.tpl new file mode 100644 index 00000000..ecfa73e7 --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/Nulecule.tpl @@ -0,0 +1,21 @@ +--- +specversion: ${nulecule_spec_version} +id: ${app_name} + +metadata: + name: ${app_name} + appversion: ${app_version} + description: ${app_desc} + +graph: + - name: ${app_name} + params: + - name: image + description: Container image to use + default: centos/httpd + artifacts: + kubernetes: + - file://artifacts/kubernetes/${app_name}_pod.yaml + - file://artifacts/kubernetes/${app_name}_service.yaml + docker: + - file://artifacts/docker/${app_name}_run diff --git a/atomicapp/nulecule/external/templates/nulecule/README.md.tpl b/atomicapp/nulecule/external/templates/nulecule/README.md.tpl new file mode 100644 index 00000000..e9b5cfff --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/README.md.tpl @@ -0,0 +1,3 @@ +# $app_name Atomic App + +My awesome Atomic App. diff --git a/atomicapp/nulecule/external/templates/nulecule/answers.conf.sample.tpl b/atomicapp/nulecule/external/templates/nulecule/answers.conf.sample.tpl new file mode 100644 index 00000000..158081b2 --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/answers.conf.sample.tpl @@ -0,0 +1,7 @@ +[general] +namespace = default +provider = kubernetes + +[${app_name}] +image = centos/httpd + diff --git a/atomicapp/nulecule/external/templates/nulecule/artifacts/docker/run.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/docker/run.tpl new file mode 100644 index 00000000..31f94adb --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/artifacts/docker/run.tpl @@ -0,0 +1 @@ +docker run -d --name $app_name -p 80:80 $image diff --git a/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl new file mode 100644 index 00000000..2fdd065b --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: $app_name + labels: + name: $app_name + +spec: + containers: + - name: $app_name + image: $image diff --git a/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl new file mode 100644 index 00000000..da008db8 --- /dev/null +++ b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: $app_name + labels: + name: $app_name +spec: + ports: + - port: 80 + targetPort: 80 + selector: + name: $app_name diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 141a12d2..6157bda5 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -1,10 +1,34 @@ # -*- coding: utf-8 -*- +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" +import logging + from atomicapp.constants import (GLOBAL_CONF, + LOGGER_COCKPIT, NAME_KEY, DEFAULTNAME_KEY, - PROVIDER_KEY) + PROVIDERS) from atomicapp.utils import Utils from atomicapp.plugin import Plugin +from atomicapp.nulecule.exceptions import NuleculeException + +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) class NuleculeBase(object): @@ -16,7 +40,6 @@ class NuleculeBase(object): def __init__(self, basepath, params, namespace): self.plugin = Plugin() - self.plugin.load_plugins() self.basepath = basepath self.params = params or [] self.namespace = namespace @@ -39,46 +62,20 @@ def load_config(self, config, ask=False, skip_asking=False): Returns: None """ - for param in self.params: - value = config.get(self.namespace, {}).get(param[NAME_KEY]) or \ - config.get(GLOBAL_CONF, {}).get(param[NAME_KEY]) - if value is None and (ask or ( - not skip_asking and param.get(DEFAULTNAME_KEY) is None)): - value = Utils.askFor(param[NAME_KEY], param) - elif value is None: - value = param.get(DEFAULTNAME_KEY) - if config.get(self.namespace) is None: - config[self.namespace] = {} - config[self.namespace][param[NAME_KEY]] = value self.config = config - - def merge_config(self, to_config, from_config): - """ - Merge values from from_config to to_config. If value for a key - in a group in to_config is missing, then only set it's value from - corresponding key in the same group in from_config. - - Args: - to_config (dict): Dictionary to merge config into - from_config (dict): Dictionary to merge config from - - Returns: - None - """ - for group, group_vars in from_config.items(): - to_config[group] = to_config.get(group) or {} - for key, value in (group_vars or {}).items(): - if to_config[group].get(key) is None: - to_config[group][key] = value - - def get_context(self): - """ - Get context data from config data for rendering an artifact. - """ - context = {} - context.update(self.config.get(GLOBAL_CONF) or {}) - context.update(self.config.get(self.namespace) or {}) - return context + for param in self.params: + value = config.get(param[NAME_KEY], scope=self.namespace, ignore_sources=['defaults']) + if value is None: + if ask or (not skip_asking and + param.get(DEFAULTNAME_KEY) is None): + cockpit_logger.info( + "%s is missing in answers.conf." % param[NAME_KEY]) + value = config.get(param[NAME_KEY], scope=self.namespace) \ + or Utils.askFor(param[NAME_KEY], param, self.namespace) + else: + value = param.get(DEFAULTNAME_KEY) + config.set(param[NAME_KEY], value, source='runtime', + scope=self.namespace) def get_provider(self, provider_key=None, dry=False): """ @@ -93,10 +90,15 @@ def get_provider(self, provider_key=None, dry=False): """ # If provider_key isn't provided via CLI, let's grab it the configuration if provider_key is None: - provider_key = self.config.get(GLOBAL_CONF)[PROVIDER_KEY] + provider_key = self.config.get('provider', scope=GLOBAL_CONF) provider_class = self.plugin.getProvider(provider_key) + if provider_class is None: + raise NuleculeException("Invalid Provider - '{}', provided in " + "answers.conf (choose from {})" + .format(provider_key, ', ' + .join(PROVIDERS))) return provider_key, provider_class( - self.get_context(), self.basepath, dry) + self.config.context(), self.basepath, dry) def run(self, provider_key=None, dry=False): raise NotImplementedError @@ -104,7 +106,7 @@ def run(self, provider_key=None, dry=False): def stop(self, provider): raise NotImplementedError - def install(self): + def fetch(self): raise NotImplementedError def uninstall(self): diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 01f17b39..cf3e4719 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -1,35 +1,58 @@ # -*- coding: utf-8 -*- +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import anymarkup -import copy import distutils.dir_util import logging import os import tempfile +import urlparse +import urllib +from string import Template -from atomicapp.constants import (GLOBAL_CONF, - ANSWERS_FILE_SAMPLE_FORMAT, +from atomicapp.constants import (ANSWERS_FILE_SAMPLE_FORMAT, ANSWERS_FILE, ANSWERS_FILE_SAMPLE, ANSWERS_RUNTIME_FILE, - DEFAULT_ANSWERS, - DEFAULT_NAMESPACE, + LOGGER_COCKPIT, + LOGGER_DEFAULT, MAIN_FILE, - NAMESPACE_KEY, - PROVIDER_KEY) + __ATOMICAPPVERSION__, + __NULECULESPECVERSION__) from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config from atomicapp.utils import Utils -logger = logging.getLogger(__name__) +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) class NuleculeManager(object): """ - Interface to install, run, stop a Nulecule application. + Interface to fetch, run, stop a Nulecule application. """ - def __init__(self, app_spec, destination=None, answers_file=None): + def __init__(self, app_spec, destination=None, + cli_answers=None, answers_file=None, + answers_format=None): """ init function for NuleculeManager. Sets a few instance variables. @@ -37,24 +60,22 @@ def __init__(self, app_spec, destination=None, answers_file=None): app_spec: either a path to an unpacked nulecule app or a container image name where a nulecule can be found destination: where to unpack a nulecule to if it isn't local + cli_answers: some answer file values provided from cli args + answers_file: the location of the answers file + answers_format (str): File format for writing sample answers file """ - # Let's pass in a default format for our answers - self.answers = copy.deepcopy(DEFAULT_ANSWERS) - self.answers_format = None + self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT self.answers_file = None # The path to an answer file self.app_path = None # The path where the app resides or will reside self.image = None # The container image to pull the app from # Adjust app_spec, destination, and answer file paths if absolute. if os.path.isabs(app_spec): - app_spec = os.path.join(Utils.getRoot(), - app_spec.lstrip('/')) + app_spec = Utils.get_real_abspath(app_spec) if destination and os.path.isabs(destination): - destination = os.path.join(Utils.getRoot(), - destination.lstrip('/')) + destination = Utils.get_real_abspath(destination) if answers_file and os.path.isabs(answers_file): - answers_file = os.path.join(Utils.getRoot(), - answers_file.lstrip('/')) + answers_file = Utils.get_real_abspath(answers_file) # If the user doesn't want the files copied to a permanent # location then he provides 'none'. If that is the case we'll @@ -83,31 +104,89 @@ def __init__(self, app_spec, destination=None, answers_file=None): self.app_path = Utils.getNewAppCacheDir(self.image) logger.debug("NuleculeManager init app_path: %s", self.app_path) - logger.debug("NuleculeManager init image: %s", self.image) + logger.debug("NuleculeManager init image: %s", self.image) + + # Create the app_path if it doesn't exist yet + if not os.path.isdir(self.app_path): + os.makedirs(self.app_path) # Set where the main nulecule file should be self.main_file = os.path.join(self.app_path, MAIN_FILE) - # If user provided a path to answers then make sure it exists. If they - # didn't provide one then use the one in the app dir if it exists. - if answers_file: - self.answers_file = answers_file - if not os.path.isfile(self.answers_file): - raise NuleculeException( - "Path for answers doesn't exist: %s" % self.answers_file) - else: - if os.path.isfile(os.path.join(self.app_path, ANSWERS_FILE)): - self.answers_file = os.path.join(self.app_path, ANSWERS_FILE) - - # TODO: put this in a better place in the future. - # If we are running inside of an openshift pod then override - # some of the config by detecting some values from the environment - if Utils.running_on_openshift(): - self.answers[GLOBAL_CONF]['provider'] = 'openshift' - self.answers[GLOBAL_CONF]['accesstoken'] = os.environ['TOKEN_ENV_VAR'] - self.answers[GLOBAL_CONF]['namespace'] = os.environ['POD_NAMESPACE'] - self.answers[GLOBAL_CONF]['providerapi'] = \ - Utils.get_openshift_api_endpoint_from_env() + # Process answers. + self.answers_file = answers_file + self.config = Config(cli=cli_answers) + + @staticmethod + def init(app_name, destination=None, app_version='1.0', + app_desc='App description'): + """Initialize a new Nulecule app + + Args: + app_name (str): Application name + destination (str): Destination path + app_version (str): Application version + app_desc (str): Application description + + Returns: + destination (str) + """ + + # context to render template files for Atomic App + context = dict( + app_name=app_name, + app_version=app_version, + app_desc=app_desc, + atomicapp_version=__ATOMICAPPVERSION__, + nulecule_spec_version=__NULECULESPECVERSION__ + ) + + if destination is None: + destination = os.path.join('.', app_name) + + # Check if destination directory exists and is not empty + if os.path.exists(destination) and \ + os.path.isdir(destination) and os.listdir(destination): + value = raw_input('Destination directory is not empty! ' + 'Do you still want to proceed? [Y]/n: ') + value = value or 'y' + if value.lower() != 'y': + return # Exit out as the user has chosen not to proceed + + # Temporary working dir to render the templates + tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') + template_dir = os.path.join(os.path.dirname(__file__), + 'external/templates/nulecule') + + try: + # Copy template dir to temporary working directory and render templates + distutils.dir_util.copy_tree(template_dir, tmpdir) + for item in os.walk(tmpdir): + parent_dir, dirs, files = item + for filename in files: + if not filename.endswith('.tpl'): + continue + templ_path = os.path.join(parent_dir, filename) + if parent_dir.endswith('artifacts/docker') or \ + parent_dir.endswith('artifacts/kubernetes'): + file_path = os.path.join( + parent_dir, + '{}_{}'.format(app_name, filename[:-4])) + else: + file_path = os.path.join(parent_dir, filename[:-4]) + with open(templ_path) as f: + s = f.read() + t = Template(s) + with open(file_path, 'w') as f: + f.write(t.safe_substitute(**context)) + os.remove(templ_path) + + # Copy rendered templates to destination directory + distutils.dir_util.copy_tree(tmpdir, destination, True) + finally: + # Remove temporary working directory + distutils.dir_util.remove_tree(tmpdir) + return destination def unpack(self, update=False, dryrun=False, nodeps=False, config=None): @@ -138,20 +217,18 @@ def unpack(self, update=False, return Nulecule.load_from_path( self.app_path, dryrun=dryrun, config=config) - def genanswers(self, dryrun=False, answers_format=None, **kwargs): + def genanswers(self, dryrun=False, **kwargs): """ Renders artifacts and then generates an answer file. Finally copies answer file to the current working directory. Args: dryrun (bool): Do not make any change to the host system if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments Returns: None """ - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT # Check to make sure an answers.conf file doesn't exist already answers_file = os.path.join(os.getcwd(), ANSWERS_FILE) @@ -160,21 +237,18 @@ def genanswers(self, dryrun=False, answers_format=None, **kwargs): "Can't generate answers.conf over existing file") # Call unpack to get the app code - self.nulecule = self.unpack(update=False, dryrun=dryrun, config=self.answers) + self.nulecule = self.unpack(update=False, dryrun=dryrun, config=self.config) - self.nulecule.load_config(config=self.nulecule.config, - skip_asking=True) + self.nulecule.load_config(skip_asking=True) # Get answers and write them out to answers.conf in cwd answers = self._get_runtime_answers( self.nulecule.config, None) - self._write_answers(answers_file, answers, answers_format) + self._write_answers(answers_file, answers, self.answers_format) - def install(self, nodeps=False, update=False, dryrun=False, - answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): + def fetch(self, nodeps=False, update=False, dryrun=False, **kwargs): """ Installs (unpacks) a Nulecule application from a Nulecule image to a target path. - Args: answers (dict or str): Answers data or local path to answers file nodeps (bool): Install the nulecule application without installing @@ -182,69 +256,55 @@ def install(self, nodeps=False, update=False, dryrun=False, update (bool): Pull requisite Nulecule image and install or update already installed Nulecule application dryrun (bool): Do not make any change to the host system if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments - Returns: None """ - if self.answers_file: - self.answers = Utils.loadAnswers(self.answers_file) - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT - # Call unpack. If the app doesn't exist it will be pulled. If # it does exist it will be just be loaded and returned - self.nulecule = self.unpack(update, dryrun, config=self.answers) + self.nulecule = self.unpack(update, dryrun, config=self.config) - self.nulecule.load_config(config=self.nulecule.config, - skip_asking=True) + self.nulecule.load_config(skip_asking=True) runtime_answers = self._get_runtime_answers( self.nulecule.config, None) # write sample answers file self._write_answers( os.path.join(self.app_path, ANSWERS_FILE_SAMPLE), - runtime_answers, answers_format) + runtime_answers, self.answers_format) + + cockpit_logger.info("Install Successful.") - def run(self, cli_provider, answers_output, ask, - answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): + def run(self, answers_output, ask, **kwargs): """ Runs a Nulecule application from a local path or a Nulecule image name. Args: answers (dict or str): Answers data or local path to answers file - cli_provider (str): Provider to use to run the Nulecule - application answers_output (str): Path to file to export runtime answers data to ask (bool): Ask for values for params with default values from user, if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments Returns: None """ - if self.answers_file: - self.answers = Utils.loadAnswers(self.answers_file) - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT dryrun = kwargs.get('dryrun') or False # Call unpack. If the app doesn't exist it will be pulled. If # it does exist it will be just be loaded and returned - self.nulecule = self.unpack(dryrun=dryrun, config=self.answers) + self.nulecule = self.unpack(dryrun=dryrun, config=self.config) - # Unless otherwise specified with CLI arguments we will - # default to the first provider available - providers = Utils.getSupportedProviders(self.app_path) - if cli_provider is None and len(providers) == 1: - self.answers[GLOBAL_CONF][PROVIDER_KEY] = providers[0] + # Process answers file + self._process_answers() - self.nulecule.load_config(config=self.nulecule.config, ask=ask) - self.nulecule.render(cli_provider, dryrun) - self.nulecule.run(cli_provider, dryrun) + self.nulecule.load_config(ask=ask) + provider = self.nulecule.config.get('provider') + self.nulecule.render(provider, dryrun) + self.nulecule.run(provider, dryrun) runtime_answers = self._get_runtime_answers( - self.nulecule.config, cli_provider) + self.nulecule.config, provider) self._write_answers( os.path.join(self.app_path, ANSWERS_RUNTIME_FILE), runtime_answers, self.answers_format) @@ -252,29 +312,24 @@ def run(self, cli_provider, answers_output, ask, self._write_answers(answers_output, runtime_answers, self.answers_format) - def stop(self, cli_provider, **kwargs): + def stop(self, **kwargs): """ Stops a running Nulecule application. Args: - cli_provider (str): Provider running the Nulecule application kwargs (dict): Extra keyword arguments """ # For stop we use the generated answer file from the run - self.answers = Utils.loadAnswers( - os.path.join(self.app_path, ANSWERS_RUNTIME_FILE)) + self.answers_file = os.path.join(self.app_path, ANSWERS_RUNTIME_FILE) + self._process_answers() dryrun = kwargs.get('dryrun') or False self.nulecule = Nulecule.load_from_path( - self.app_path, config=self.answers, dryrun=dryrun) - self.nulecule.load_config(config=self.answers) - self.nulecule.render(cli_provider, dryrun=dryrun) - self.nulecule.stop(cli_provider, dryrun) - - def uninstall(self): - # For future use - self.stop() - self.nulecule.uninstall() + self.app_path, config=self.config, dryrun=dryrun) + self.nulecule.load_config() + self.nulecule.render(self.nulecule.config.get('provider'), + dryrun=dryrun) + self.nulecule.stop(self.nulecule.config.get('provider'), dryrun) def clean(self, force=False): # For future use @@ -282,6 +337,51 @@ def clean(self, force=False): distutils.dir_util.remove_tree(self.unpack_path) self.initialize() + def _process_answers(self): + """ + Processes answer files to load data from them and then merges + any cli provided answers into the config. + + NOTE: This function should be called once on startup and then + once more after the application has been extracted, but only + if answers file wasn't found on the first invocation. The idea + is to allow for people to embed an answers file in the application + if they want, which won't be available until after extraction. + + Returns: + None + """ + answers = None + app_path_answers = os.path.join(self.app_path, ANSWERS_FILE) + + # If the user didn't provide an answers file then check the app + # dir to see if one exists. + if not self.answers_file: + if os.path.isfile(app_path_answers): + self.answers_file = app_path_answers + + # At this point if we have an answers file, load it + if self.answers_file: + + # If this is a url then download answers file to app directory + if urlparse.urlparse(self.answers_file).scheme != "": + logger.debug("Retrieving answers file from: {}" + .format(self.answers_file)) + with open(app_path_answers, 'w+') as f: + stream = urllib.urlopen(self.answers_file) + f.write(stream.read()) + self.answers_file = app_path_answers + + # Check to make sure the file exists + if not os.path.isfile(self.answers_file): + raise NuleculeException( + "Provided answers file doesn't exist: {}".format(self.answers_file)) + + # Load answers + answers = Utils.loadAnswers(self.answers_file, self.answers_format) + + self.config.update_source(source='answers', data=answers) + def _write_answers(self, path, answers, answers_format): """ Write answers data to file. @@ -297,8 +397,16 @@ def _write_answers(self, path, answers, answers_format): logger.debug("Writing answers to file.") logger.debug("FILE: %s", path) logger.debug("ANSWERS: %s", answers) + logger.debug("ANSWERS FORMAT: %s", answers_format) anymarkup.serialize_file(answers, path, format=answers_format) + # Make sure that the permission of the file is set to the current user + Utils.setFileOwnerGroup(path) + + # TODO - once we rework config data we shouldn't need this + # function anymore, we should be able to take the data + # straight from the config object since the defaults and args + # provided from the cli would have already been merged. def _get_runtime_answers(self, config, cli_provider): """ Get runtime answers data from config (Nulecule config) by adding @@ -311,12 +419,4 @@ def _get_runtime_answers(self, config, cli_provider): Returns: dict """ - _config = copy.deepcopy(config) - _config[GLOBAL_CONF] = config.get(GLOBAL_CONF) or {} - _config[GLOBAL_CONF][NAMESPACE_KEY] = _config[GLOBAL_CONF].get( - NAMESPACE_KEY) or DEFAULT_NAMESPACE - # If a provider is provided via CLI, override the config parameter - if cli_provider: - _config[GLOBAL_CONF][PROVIDER_KEY] = cli_provider - - return _config + return self.nulecule.config.runtime_answers() diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 0a245df9..2b13b498 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -22,13 +22,14 @@ from __future__ import print_function import os -import imp - import logging +import importlib from utils import Utils -from constants import HOST_DIR, PROVIDER_CONFIG_KEY +from constants import (HOST_DIR, + LOGGER_DEFAULT, + PROVIDER_CONFIG_KEY) -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Provider(object): @@ -39,7 +40,9 @@ class Provider(object): dryrun = None container = False config_file = None - __artifacts = None + + # By default, no artifacts are loaded + __artifacts = [] @property def artifacts(self): @@ -72,10 +75,10 @@ def getConfigFile(self): """ if PROVIDER_CONFIG_KEY in self.config: self.config_file = self.config[PROVIDER_CONFIG_KEY] - if self.container: - self.config_file = os.path.join(Utils.getRoot(), self.config_file.lstrip("/")) + if os.path.isabs(self.config_file): + self.config_file = Utils.get_real_abspath(self.config_file) else: - logger.warning("Configuration option '%s' not found" % PROVIDER_CONFIG_KEY) + logger.debug("Configuration option '%s' not provided" % PROVIDER_CONFIG_KEY) def checkConfigFile(self): if not self.config_file: @@ -114,8 +117,8 @@ def __repr__(self): class ProviderFailedException(Exception): - """Error during provider execution""" + pass class Plugin(object): @@ -124,42 +127,11 @@ class Plugin(object): def __init__(self, ): pass - def load_plugins(self): - run_path = os.path.dirname(os.path.realpath(__file__)) - providers_dir = os.path.join(run_path, "providers") - logger.debug("Loading providers from %s", providers_dir) - - plugin_classes = {} - plugin_class = globals()["Provider"] - - for f in os.listdir(providers_dir): - if f.endswith(".py"): - module_name = os.path.basename(f).rsplit('.', 1)[0] - try: - f_module = imp.load_source( - module_name, os.path.join(providers_dir, f)) - except (IOError, OSError, ImportError) as ex: - logger.warning("can't load module '%s': %s", f, repr(ex)) - continue - - for name in dir(f_module): - binding = getattr(f_module, name, None) - try: - # if you try to compare binding and PostBuildPlugin, python won't match them if you call - # this script directly b/c: - # ! <= - # but - # <= - is_sub = issubclass(binding, plugin_class) - except TypeError: - is_sub = False - if binding and is_sub and plugin_class.__name__ != binding.__name__: - plugin_classes[binding.key] = binding - - self.plugins = plugin_classes - def getProvider(self, provider_key): - for key, provider in self.plugins.iteritems(): - if key == provider_key: - logger.debug("Found provider %s", provider) - return provider + try: + module = importlib.import_module("atomicapp.providers.%s" % provider_key) + provider_class = "%sProvider" % provider_key.capitalize() + provider = getattr(module, provider_class) + except ImportError: + provider = None + return provider diff --git a/atomicapp/providers/__init__.py b/atomicapp/providers/__init__.py index e69de29b..4989594f 100644 --- a/atomicapp/providers/__init__.py +++ b/atomicapp/providers/__init__.py @@ -0,0 +1,18 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index b7d7c484..af6615c3 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -21,11 +21,14 @@ import subprocess import re import logging -from atomicapp.constants import DEFAULT_CONTAINER_NAME, DEFAULT_NAMESPACE +from atomicapp.constants import (DEFAULT_CONTAINER_NAME, + DEFAULT_NAMESPACE, + LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import Utils +from atomicapp.nulecule.exceptions import DockerException -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class DockerProvider(Provider): @@ -70,14 +73,14 @@ def init(self): raise ProviderFailedException(msg) def _get_containers(self): - docker_cmd = 'docker inspect --format="{{ .Name }}" $(docker ps -aq --no-trunc) | sed "s,/,,g"' + docker_cmd = 'docker ps -a --format="{{ .Names }}"' if self.dryrun: logger.info("DRY-RUN: %s", docker_cmd) return [] else: return dict((line, 1) for line in subprocess.check_output(docker_cmd, shell=True).splitlines()) - def deploy(self): + def run(self): logger.info("Deploying to provider: Docker") for container in self._get_containers(): if re.match("%s_+%s+_+[a-zA-Z0-9]{12}" % (self.namespace, self.image), container): @@ -88,12 +91,14 @@ def deploy(self): label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() + # if docker-run provided as multiline command + label_run = ' '.join(label_run.split('\\\n')) run_args = label_run.split() # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. # Else use namespace provided within answers.conf if '--name' in run_args: - logger.info("WARNING: Using --name provided within artifact file.") + logger.warning("WARNING: Using --name provided within artifact file.") else: run_args.insert(run_args.index('run') + 1, "--name=%s_%s_%s" % (self.namespace, self.image, Utils.getUniqueUUID())) @@ -101,9 +106,12 @@ def deploy(self): if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: - subprocess.check_call(cmd) + try: + subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + raise DockerException("%s. \n%s" % (cmd, e.output)) - def undeploy(self): + def stop(self): logger.info("Undeploying to provider: Docker") artifact_names = list() @@ -113,6 +121,12 @@ def undeploy(self): label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() + + # If user specified a name of the container via --name=NAME then + # then remove the equals sign since it breaks our later processing + label_run = label_run.replace('--name=', '--name ') + + # Convert to list for processing run_args = label_run.split() # If any artifacts are labelled by name, add it to a container dict list @@ -132,4 +146,7 @@ def undeploy(self): if self.dryrun: logger.info("DRY-RUN: STOPPING CONTAINER %s", " ".join(cmd)) else: - subprocess.check_call(cmd) + try: + subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + raise DockerException("STOPPING CONTAINER failed: %s. \n%s" % (cmd, e.output)) diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 41757249..a2109ba8 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -20,12 +20,23 @@ import anymarkup import logging import os -from string import Template +from atomicapp.constants import (PROVIDER_AUTH_KEY, + ANSWERS_FILE, + DEFAULT_NAMESPACE, + LOGGER_DEFAULT, + PROVIDER_API_KEY, + PROVIDER_CA_KEY, + PROVIDER_TLS_VERIFY_KEY, + LOGGER_COCKPIT, + K8S_DEFAULT_API) from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.utils import printErrorStatus, Utils -logger = logging.getLogger(__name__) +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.utils import Utils +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) class KubernetesProvider(Provider): @@ -34,221 +45,249 @@ class KubernetesProvider(Provider): This class implements deploy, stop and undeploy of an atomicapp on Kubernetes provider. """ + + # Class variables key = "kubernetes" + namespace = DEFAULT_NAMESPACE + k8s_artifacts = {} + + # From the provider configuration config_file = None - kubectl = None - def init(self): - self.namespace = "default" + # Essential provider parameters + provider_api = None + provider_auth = None + provider_tls_verify = None + provider_ca = None - self.k8s_manifests = [] + def init(self): + self.k8s_artifacts = {} logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) - if self.container: - self.kubectl = self._find_kubectl(Utils.getRoot()) - kube_conf_path = "/etc/kubernetes" - host_kube_conf_path = os.path.join(Utils.getRoot(), kube_conf_path.lstrip("/")) - if not os.path.exists(kube_conf_path) and os.path.exists(host_kube_conf_path): - if self.dryrun: - logger.info("DRY-RUN: link %s from %s" % (kube_conf_path, host_kube_conf_path)) - else: - os.symlink(host_kube_conf_path, kube_conf_path) + + self._process_artifacts() + + if self.dryrun: + return + + ''' + Config_file: + If a config_file has been provided, use the configuration + from the file and load the associated generated file. + If a config_file exists (--provider-config) use that. + + Params: + If any provider specific parameters have been provided, + load the configuration through the answers.conf file + + .kube/config: + If no config file or params are provided by user then try to find and + use a config file at the default location. + + no config at all: + If no .kube/config file can be found then try to connect to the default + unauthenticated http://localhost:8080/api end-point. + ''' + + default_config_loc = os.path.join( + Utils.getRoot(), Utils.getUserHome().strip('/'), '.kube/config') + + if self.config_file: + logger.debug("Provider configuration provided") + self.api = Client(KubeConfig.from_file(self.config_file), "kubernetes") + elif self._check_required_params(): + logger.debug("Generating .kube/config from given parameters") + self.api = Client(self._from_required_params(), "kubernetes") + elif os.path.isfile(default_config_loc): + logger.debug(".kube/config exists, using default configuration file") + self.api = Client(KubeConfig.from_file(default_config_loc), "kubernetes") else: - self.kubectl = self._find_kubectl() + self.config["provider-api"] = K8S_DEFAULT_API + self.api = Client(self._from_required_params(), "kubernetes") + + # Check if the namespace that the app is being deployed to is available + self._check_namespaces() + + def _build_param_dict(self): + # Initialize the values + paramdict = {PROVIDER_API_KEY: self.provider_api, + PROVIDER_AUTH_KEY: self.provider_auth, + PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, + PROVIDER_CA_KEY: self.provider_ca} + + # Get values from the loaded answers.conf / passed CLI params + for k in paramdict.keys(): + paramdict[k] = self.config.get(k) + + return paramdict + + def _check_required_params(self, exception=False): + ''' + This checks to see if required parameters associated to the Kubernetes + provider are passed. Only PROVIDER_API_KEY is *required*. Token may be blank. + ''' + + paramdict = self._build_param_dict() + logger.debug("List of parameters passed: %s" % paramdict) + + # Check that the required parameters are passed. If not, error out. + for k in [PROVIDER_API_KEY]: + if paramdict[k] is None: + if exception: + msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) + raise ProviderFailedException(msg) + else: + return False + + return True + + def _from_required_params(self): + ''' + Create a default configuration from passed environment parameters. + ''' + + self._check_required_params(exception=True) + paramdict = self._build_param_dict() + + logger.debug("Building from required params") + # Generate the configuration from the paramters + config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY], + auth=paramdict[PROVIDER_AUTH_KEY], + ca=paramdict[PROVIDER_CA_KEY], + verify=paramdict[PROVIDER_TLS_VERIFY_KEY]) + logger.debug("Passed configuration for .kube/config %s" % config) + return config + + def _check_namespaces(self): + ''' + This function checks to see whether or not the namespaces created in the cluster match the + namespace that is associated and/or provided in the deployed application + ''' + + # Get the namespaces and output the currently used ones + namespace_list = self.api.namespaces() + logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) + + # Create a namespace list + namespaces = [] + for ns in namespace_list: + namespaces.append(ns["metadata"]["name"]) + + # Output the namespaces and check to see if the one provided exists + logger.debug("Namespaces: %s" % namespaces) + if self.namespace not in namespaces: + msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace + raise ProviderFailedException(msg) - if not self.dryrun: - if not os.access(self.kubectl, os.X_OK): - raise ProviderFailedException("Command: " + self.kubectl + " not found") + def _process_artifacts(self): + """ + Parse each Kubernetes file and convert said format into an Object for + deployment. + """ + for artifact in self.artifacts: + logger.debug("Processing artifact: %s", artifact) + data = None - # Check if Kubernetes config file is accessible, but only - # if one was provided by the user; config file is optional. - if self.config_file: - self.checkConfigFile() + # Open and parse the artifact data + with open(os.path.join(self.path, artifact), "r") as fp: + data = anymarkup.parse(fp, force_types=None) - def _find_kubectl(self, prefix=""): - """Determine the path to the kubectl program on the host. - 1) Check the config for a provider_cli in the general section - remember to add /host prefix - 2) Search /usr/bin:/usr/local/bin + # Process said artifacts + self._process_artifact_data(artifact, data) - Use the first valid value found + def _process_artifact_data(self, artifact, data): """ + Process the data for an artifact - if self.dryrun: - # Testing env does not have kubectl in it - return "/usr/bin/kubectl" + Args: + artifact (str): Artifact name + data (dict): Artifact data + """ - test_paths = ['/usr/bin/kubectl', '/usr/local/bin/kubectl'] - if self.config.get("provider_cli"): - logger.info("caller gave provider_cli: " + self.config.get("provider_cli")) - test_paths.insert(0, self.config.get("provider_cli")) + # Check if kind exists + if "kind" not in data.keys(): + raise ProviderFailedException( + "Error processing %s artifact. There is no kind" % artifact) - for path in test_paths: - test_path = prefix + path - logger.info("trying kubectl at " + test_path) - kubectl = test_path - if os.access(kubectl, os.X_OK): - logger.info("found kubectl at " + test_path) - return kubectl + # Change to lower case so it's easier to parse + kind = data["kind"].lower() - raise ProviderFailedException("No kubectl found in %s" % ":".join(test_paths)) + if kind not in self.k8s_artifacts.keys(): + self.k8s_artifacts[kind] = [] - def _call(self, cmd): - """Calls given command + # Fail if there is no metadata + if 'metadata' not in data: + raise ProviderFailedException( + "Error processing %s artifact. There is no metadata object" % artifact) - :arg cmd: Command to be called in a form of list - :raises: Exception - """ + # Change to the namespace specified on init() + data['metadata']['namespace'] = self.namespace - if self.dryrun: - logger.info("DRY-RUN: %s", " ".join(cmd)) + if 'labels' not in data['metadata']: + data['metadata']['labels'] = {'namespace': self.namespace} else: - ec, stdout, stderr = Utils.run_cmd(cmd, checkexitcode=True) - return stdout + data['metadata']['labels']['namespace'] = self.namespace - def process_k8s_artifacts(self): - """Processes Kubernetes manifests files and checks if manifest under - process is valid. + self.k8s_artifacts[kind].append(data) + + ''' + This is DEPRECATED and not needed anymore as we check the /resource URL of the kubernetes api against the artifact + def _identify_api(self, artifact, data): """ - for artifact in self.artifacts: - data = None - with open(os.path.join(self.path, artifact), "r") as fp: - logger.debug(os.path.join(self.path, artifact)) - try: - data = anymarkup.parse(fp) - except Exception: - msg = "Error processing %s artifcats, Error:" % os.path.join( - self.path, artifact) - printErrorStatus(msg) - raise - if "kind" in data: - self.k8s_manifests.append((data["kind"].lower(), artifact)) - else: - apath = os.path.join(self.path, artifact) - raise ProviderFailedException("Malformed kube file: %s" % apath) - - def _resource_identity(self, path): - """Finds the Kubernetes resource name / identity from resource manifest - and raises if manifest is not supported. - - :arg path: Absolute path to Kubernetes resource manifest - - :return: str -- Resource name / identity - - :raises: ProviderFailedException + Make sure that the artifact is using the correct API + + Args: + artifact (str): Artifact name + data (dict): Artifact data """ - data = anymarkup.parse_file(path) if data["apiVersion"] == "v1": - return data["metadata"]["name"] + pass elif data["apiVersion"] in ["v1beta3", "v1beta2", "v1beta1"]: - msg = ("%s is not supported API version, update Kubernetes " + msg = ("%s is not a supported API version, update Kubernetes " "artifacts to v1 API version. Error in processing " - "%s manifest." % (data["apiVersion"], path)) + "%s manifest." % (data["apiVersion"], artifact)) raise ProviderFailedException(msg) else: - raise ProviderFailedException("Malformed kube file: %s" % path) + raise ProviderFailedException("Malformed kubernetes artifact: %s" % artifact) + ''' - def _scale_replicas(self, path, replicas=0): - """Scales replicationController to specified replicas size - - :arg path: Path to replicationController manifest - :arg replicas: Replica size to scale to. + def run(self): """ - rname = self._resource_identity(path) - cmd = [self.kubectl, "scale", "rc", rname, - "--replicas=%s" % str(replicas), - "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - - self._call(cmd) - - def deploy(self): - """Deploys the app by given resource manifests. + Deploys the app by given resource artifacts. """ logger.info("Deploying to Kubernetes") - self.process_k8s_artifacts() - - for kind, artifact in self.k8s_manifests: - if not artifact: - continue - - k8s_file = os.path.join(self.path, artifact) - cmd = [self.kubectl, "create", "-f", k8s_file, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) + for kind, objects in self.k8s_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.create(artifact, self.namespace) - def undeploy(self): + def stop(self): """Undeploys the app by given resource manifests. Undeploy operation first scale down the replicas to 0 and then deletes the resource from cluster. """ logger.info("Undeploying from Kubernetes") - self.process_k8s_artifacts() - - for kind, artifact in self.k8s_manifests: - if not artifact: - continue - path = os.path.join(self.path, artifact) - - if kind in ["ReplicationController", "rc", "replicationcontroller"]: - self._scale_replicas(path, replicas=0) - - cmd = [self.kubectl, "delete", "-f", path, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) + for kind, objects in self.k8s_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.delete(artifact, self.namespace) + # TODO def persistent_storage(self, graph, action): - """ - Actions are either: run, stop or uninstall as per the Requirements class - Curently run is the only function implemented for k8s persistent storage - """ - - logger.debug("Persistent storage enabled! Running action: %s" % action) - - if action not in ['run']: - logger.warning( - "%s action is not available for provider %s. Doing nothing." % - (action, self.key)) - return + pass - self._check_persistent_volumes() - - # Get the path of the persistent storage yaml file includes in /external - # Plug the information from the graph into the persistent storage file - base_path = os.path.dirname(os.path.realpath(__file__)) - template_path = os.path.join(base_path, - 'external/kubernetes/persistent_storage.yaml') - with open(template_path, 'r') as f: - content = f.read() - template = Template(content) - rendered_template = template.safe_substitute(graph) - - tmp_file = Utils.getTmpFile(rendered_template, '.yaml') - - # Pass the .yaml file and execute - if action is "run": - cmd = [self.kubectl, "create", "-f", tmp_file, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) - os.unlink(tmp_file) - - def _check_persistent_volumes(self): - cmd = [self.kubectl, "get", "pv"] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - lines = self._call(cmd) - - # If there are no persistent volumes to claim, warn the user - if not self.dryrun and len(lines.split("\n")) == 2: - logger.warning("No persistent volumes detected in Kubernetes. Volume claim will not " - "initialize unless persistent volumes exist.") + # TODO + def _check_persistent_volumes(self, graph, action): + pass diff --git a/atomicapp/providers/lib/__init__.py b/atomicapp/providers/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/atomicapp/providers/lib/kubeshift/__init__.py b/atomicapp/providers/lib/kubeshift/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/atomicapp/providers/lib/kubeshift/client.py b/atomicapp/providers/lib/kubeshift/client.py new file mode 100644 index 00000000..2ce47865 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/client.py @@ -0,0 +1,61 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient +from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient +from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError +from atomicapp.constants import LOGGER_DEFAULT +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class Client(object): + + def __init__(self, config, provider): + ''' + + Args: + config (obj): Object of the configuration data + provider (str): String value of the provider that is being used + + ''' + self.config = config + self.provider = provider + + # Choose the type of provider that is being used. Error out if it is not available + if provider is "kubernetes": + self.connection = KubeKubernetesClient(config) + logger.debug("Using Kubernetes Provider KubeClient library") + elif provider is "openshift": + self.connection = KubeOpenshiftClient(config) + logger.debug("Using OpenShift Provider KubeClient library") + else: + raise KubeClientError("No provider by that name.") + + # Create an object using its respective API + def create(self, obj, namespace="default"): + self.connection.create(obj, namespace) + + # Delete an object using its respective API + def delete(self, obj, namespace="default"): + self.connection.delete(obj, namespace) + + # Current support: kubernetes only + def namespaces(self): + return self.connection.namespaces() diff --git a/atomicapp/providers/lib/kubeshift/exceptions.py b/atomicapp/providers/lib/kubeshift/exceptions.py new file mode 100644 index 00000000..eb00886b --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/exceptions.py @@ -0,0 +1,42 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + + +class KubeOpenshiftError(Exception): + pass + + +class KubeKubernetesError(Exception): + pass + + +class KubeConfigError(Exception): + pass + + +class KubeClientError(Exception): + pass + + +class KubeConnectionError(Exception): + pass + + +class KubeBaseError(Exception): + pass diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py new file mode 100644 index 00000000..081354ea --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/kubebase.py @@ -0,0 +1,350 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import requests +import websocket +import tempfile +import base64 +import ssl +from requests.exceptions import SSLError +from atomicapp.providers.lib.kubeshift.exceptions import (KubeBaseError, + KubeConnectionError) +from atomicapp.constants import LOGGER_DEFAULT +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeBase(object): + + ''' + The role of Kube Base is to parse the Kube Config file and create an + understandable API as well as initiation of connection to + Kubernetes-based APIs (OpenShift/Kubernetes). + + ''' + cluster = None + user = None + token = None + client_certification = None + client_key = None + certificate_authority_data = None # Not yet implemented + certificate_authority = None + certificate_ca = None # Not yet implemented + insecure_skip_tls_verify = False + + def __init__(self, config): + ''' + Args: + config (object): An object of the .kube/config configuration + ''' + self.kubeconfig = config + + # Gather the "current-context" from .kube/config which lists what the + # associated cluster, user, token, etc. is being used. + if "current-context" not in config: + raise KubeBaseError("'current-context' needs to be set within .kube/config") + else: + self.current_context = config["current-context"] + + # Gather the context and cluster details of .kube/config based upon the current_context + kubeconfig_context = self._contexts()[self.current_context] + kubeconfig_cluster = kubeconfig_context['cluster'] + self.cluster = self._clusters()[kubeconfig_cluster] + + # Gather cluster information (certificate authority) + if "certificate-authority" in self.cluster: + self.certificate_authority = self.cluster["certificate-authority"] + + if "insecure-skip-tls-verify" in self.cluster: + self.insecure_skip_tls_verify = self.cluster["insecure-skip-tls-verify"] + + # If a 'user' is present, gather the information in order to retrieve the token(s), + # certificate(s) as well as client-key. A user is OPTIONAL in the .kube/config data + # and hence the if statement. + if "user" in kubeconfig_context: + + kubeconfig_user = kubeconfig_context['user'] + self.user = self._users()[kubeconfig_user] + + if "token" in self.user: + self.token = self.user['token'] + + if "client-certificate" in self.user: + self.client_certification = self.user['client-certificate'] + + if "client-key" in self.user: + self.client_key = self.user['client-key'] + + # Initialize the connection using all the .kube/config credentials + self.api = self._connection() + + def request(self, method, url, data=None): + ''' + Completes the request to the API and fails if the status_code is != 200/201 + + Args: + method (str): put/get/post/patch + url (str): url of the api call + data (object): object of the data that is being passed (will be converted to json) + ''' + status_code = None + return_data = None + + try: + res = self._request_method(method, url, data) + status_code = res.status_code + return_data = res.json() + except requests.exceptions.ConnectTimeout: + msg = "Timeout when connecting to %s" % url + raise KubeConnectionError(msg) + except requests.exceptions.ReadTimeout: + msg = "Timeout when reading from %s" % url + raise KubeConnectionError(msg) + except requests.exceptions.ConnectionError: + msg = "Refused connection to %s" % url + raise KubeConnectionError(msg) + except SSLError: + raise KubeConnectionError("SSL/TLS ERROR: invalid certificate") + except ValueError: + return_data = None + + # 200 = OK + # 201 = PENDING + # EVERYTHING ELSE == FAIL + if status_code is not 200 and status_code is not 201: + raise KubeConnectionError("Unable to complete request: Status: %s, Error: %s" + % (status_code, return_data)) + return return_data + + def websocket_request(self, url, outfile=None): + ''' + Due to the requests library not supporting SPDY, websocket(s) are required + to communicate to the API. + + Args: + url (str): URL of the API + outfile (str): path of the outfile/data. + ''' + url = 'wss://' + url.split('://', 1)[-1] + logger.debug('Converted http to wss url: %s', url) + results = [] + + ws = websocket.WebSocketApp( + url, + on_message=lambda ws, message: self._handle_exec_reply(ws, message, results, outfile)) + + ws.run_forever(sslopt={ + 'ca_certs': self.cert_ca if self.cert_ca is not None else ssl.CERT_NONE, + 'cert_reqs': ssl.CERT_REQUIRED if self.insecure_skip_tls_verify else ssl.CERT_NONE}) + + # If an outfile was not provided, return the results in its entirety + if not outfile: + return ''.join(results) + + def get_groups(self, url): + ''' + Get the groups of APIs available. + ''' + data = self.request("get", url) + groups = data["groups"] or [] + groups = [(group['name'], [i['version'] for i in group['versions']]) for group in groups] + return groups + + def get_resources(self, url): + ''' + Get the resources available to the API. This is a list of all available + API calls that can be made to the API. + ''' + data = self.request("get", url) + resources = data["resources"] or [] + resources = [res['name'] for res in resources] + return resources + + def test_connection(self, url): + self.api.request("get", url) + logger.debug("Connection successfully tested on URL %s" % url) + + @staticmethod + def cert_file(data, key): + ''' + Some certificate .kube/config components are required to be a filename. + + Returns either the filename or a tmp location of said data in a file. + All certificates used with Kubernetes are base64 encoded and thus need to be decoded + + Keys which have "-data" associated with the name are base64 encoded. All others are not. + ''' + + # If it starts with /, we assume it's a filename so we just return that. + if data.startswith('/'): + return data + + # If it's data, we assume it's a certificate and we decode and write to a tmp file + # If the base64 param has been passed as true, we decode the data. + else: + with tempfile.NamedTemporaryFile(delete=False) as f: + # If '-data' is included in the keyname, it's a base64 encoded string and + # is required to be decoded + if "-data" in key: + f.write(base64.b64decode(data)) + else: + f.write(data) + return f.name + + @staticmethod + def kind_to_resource_name(kind): + """ + Converts kind to resource name. It is same logics + as in k8s.io/k8s/pkg/api/meta/restmapper.go (func KindToResource) + Example: + Pod -> pods + Policy - > policies + BuildConfig - > buildconfigs + + Args: + kind (str): Kind of the object + + Returns: + Resource name (str) (kind in plural form) + """ + singular = kind.lower() + if singular.endswith(("s", "x", "z", "ch", "sh")): + plural = singular + "es" + else: + if singular[-1] == "s": + plural = singular + elif singular[-1] == "y": + plural = singular.rstrip("y") + "ies" + else: + plural = singular + "s" + return plural + + def _contexts(self): + ''' + Parses the contexts and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', context: 'foo' } + ''' + contexts = {} + if "contexts" not in self.kubeconfig: + raise KubeBaseError("No contexts within the .kube/config file") + for f in self.kubeconfig["contexts"]: + contexts[f["name"]] = f["context"] + return contexts + + def _clusters(self): + ''' + Parses the clusters and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', cluster: 'foo' } + ''' + clusters = {} + if "clusters" not in self.kubeconfig: + raise KubeBaseError("No clusters within the .kube/config file") + for f in self.kubeconfig["clusters"]: + clusters[f["name"]] = f["cluster"] + return clusters + + def _users(self): + ''' + Parses the users and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', user: 'foo' } + ''' + users = {} + if "users" not in self.kubeconfig: + raise KubeBaseError("No users within the .kube/config file") + for f in self.kubeconfig["users"]: + users[f["name"]] = f["user"] + return users + + def _connection(self): + ''' + Initializes the required requests session certs / token / authentication + in order to communicate with the API + ''' + connection = requests.Session() + + # CA Certificate for TLS verification + if self.certificate_authority: + connection.verify = self.cert_file( + self.certificate_authority, + "certificate-authority") + + # Check to see if verification has been disabled, if it has + # disable tls-verification + if self.insecure_skip_tls_verify: + connection.verify = False + # Disable the 'InsecureRequestWarning' notifications. + # As per: https://github.com/kennethreitz/requests/issues/2214 + # Instead make a large one-time noticable warning instead + requests.packages.urllib3.disable_warnings() + logger.warning("CAUTION: TLS verification has been DISABLED") + else: + logger.debug("Verification will be required for all API calls") + + # If we're using a token, use it, otherwise it's assumed the user uses + # client-certificate and client-key + if self.token: + connection.headers["Authorization"] = "Bearer %s" % self.token + + # Lastly, if we have client-certificate and client-key in the .kube/config + # we add them to the connection as a cert + if self.client_certification and self.client_key: + connection.cert = ( + self.cert_file(self.client_certification, "client-certificate"), + self.cert_file(self.client_key, "client-key") + ) + + return connection + + def _handle_ws_reply(self, ws, message, results, outfile=None): + """ + Handle websocket reply messages for each exec call + """ + # FIXME: For some reason, we do not know why, we need to ignore the + # 1st char of the message, to generate a meaningful result + cleaned_msg = message[1:] + if outfile: + with open(outfile, 'ab') as f: + f.write(cleaned_msg) + else: + results.append(cleaned_msg) + + def _request_method(self, method, url, data): + ''' + Converts the method to the most appropriate request and calls it. + + Args: + method (str): put/get/post/patch + url (str): url of the api call + data (object): object of the data that is being passed (will be converted to json) + ''' + if method.lower() == "get": + res = self.api.get(url, json=data) + elif method.lower() == "post": + res = self.api.post(url, json=data) + elif method.lower() == "put": + res = self.api.put(url, json=data) + elif method.lower() == "delete": + res = self.api.delete(url, json=data) + elif method.lower() == "patch": + headers = {"Content-Type": "application/json-patch+json"} + res = self.api.patch(url, json=data, headers=headers) + return res diff --git a/atomicapp/providers/lib/kubeshift/kubeconfig.py b/atomicapp/providers/lib/kubeshift/kubeconfig.py new file mode 100644 index 00000000..8a4b1beb --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/kubeconfig.py @@ -0,0 +1,175 @@ +import anymarkup + +from atomicapp.plugin import ProviderFailedException +from atomicapp.constants import (PROVIDER_AUTH_KEY, + LOGGER_DEFAULT, + NAMESPACE_KEY, + PROVIDER_API_KEY, + PROVIDER_TLS_VERIFY_KEY, + PROVIDER_CA_KEY) +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeConfig(object): + + @staticmethod + def from_file(filename): + ''' + Load a file using anymarkup + + Params: + filename (str): File location + ''' + + return anymarkup.parse_file(filename) + + @staticmethod + def from_params(api=None, auth=None, ca=None, verify=True): + ''' + Creates a .kube/config configuration as an + object based upon the arguments given. + + Params: + api(str): API URL of the server + auth(str): Authentication key for the server + ca(str): The certificate being used. This can be either a file location or a base64 encoded string + verify(bool): true/false of whether or not certificate verification is enabled + + Returns: + config(obj): An object file of generate .kube/config + + ''' + config = { + "clusters": [ + { + "name": "self", + "cluster": { + }, + }, + ], + "users": [ + { + "name": "self", + "user": { + "token": "" + }, + }, + ], + "contexts": [ + { + "name": "self", + "context": { + "cluster": "self", + "user": "self", + }, + } + ], + "current-context": "self", + } + if api: + config['clusters'][0]['cluster']['server'] = api + + if auth: + config['users'][0]['user']['token'] = auth + + if ca: + config['clusters'][0]['cluster']['certificate-authority'] = ca + + if verify is False: + config['clusters'][0]['cluster']['insecure-skip-tls-verify'] = 'true' + return config + + @staticmethod + def parse_kubeconf(filename): + """" + Parse kubectl config file + + Args: + filename (string): path to configuration file (e.g. ./kube/config) + + Returns: + dict of parsed values from config + + Example of expected file format: + apiVersion: v1 + clusters: + - cluster: + server: https://10.1.2.2:8443 + certificate-authority: path-to-ca.cert + insecure-skip-tls-verify: false + name: 10-1-2-2:8443 + contexts: + - context: + cluster: 10-1-2-2:8443 + namespace: test + user: test-admin/10-1-2-2:8443 + name: test/10-1-2-2:8443/test-admin + current-context: test/10-1-2-2:8443/test-admin + kind: Config + preferences: {} + users: + - name: test-admin/10-1-2-2:8443 + user: + token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF + """ + logger.debug("Parsing %s", filename) + + with open(filename, 'r') as fp: + kubecfg = anymarkup.parse(fp.read()) + + try: + return KubeConfig.parse_kubeconf_data(kubecfg) + except ProviderFailedException: + raise ProviderFailedException('Invalid %s' % filename) + + @staticmethod + def parse_kubeconf_data(kubecfg): + """ + Parse kubeconf data. + + Args: + kubecfg (dict): Kubernetes config data + + Returns: + dict of parsed values from config + """ + url = None + token = None + namespace = None + tls_verify = True + ca = None + + current_context = kubecfg["current-context"] + + logger.debug("current context: %s", current_context) + + try: + context = filter(lambda co: co["name"] == current_context, + kubecfg["contexts"])[0] + logger.debug("context: %s", context) + + cluster = filter(lambda cl: cl["name"] == context["context"]["cluster"], + kubecfg["clusters"])[0] + logger.debug("cluster: %s", cluster) + + user = filter(lambda usr: usr["name"] == context["context"]["user"], + kubecfg["users"])[0] + logger.debug("user: %s", user) + except IndexError: + raise ProviderFailedException() + + url = cluster["cluster"]["server"] + token = user["user"]["token"] + if "namespace" in context["context"]: + namespace = context["context"]["namespace"] + if "insecure-skip-tls-verify" in cluster["cluster"]: + tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] + elif "certificate-authority" in cluster["cluster"]: + ca = cluster["cluster"]["certificate-authority"] + + return {PROVIDER_API_KEY: url, + PROVIDER_AUTH_KEY: token, + NAMESPACE_KEY: namespace, + PROVIDER_TLS_VERIFY_KEY: tls_verify, + PROVIDER_CA_KEY: ca} diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py new file mode 100644 index 00000000..5bbbc7a2 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/kubernetes.py @@ -0,0 +1,197 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import logging +import re + +from urlparse import urljoin +from urllib import urlencode +from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError) + +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeKubernetesClient(object): + + def __init__(self, config): + ''' + + Args: + config (obj): Object of the configuration data + + ''' + + # The configuration data passed in will be .kube/config data, so process is accordingly. + self.api = KubeBase(config) + + # Check the API url + url = self.api.cluster['server'] + if not re.match('(?:http|https)://', url): + raise KubeKubernetesError("Kubernetes API URL does not include HTTP or HTTPS") + + # Gather what end-points we will be using + self.k8s_api = urljoin(url, "api/v1/") + + # Test the connection before proceeding + self.api.test_connection(self.k8s_api) + + # Gather the resource names which will be used for the 'kind' API calls + self.k8s_api_resources = {} + self.k8s_api_resources['v1'] = self.api.get_resources(self.k8s_api) + + # Gather what API groups are available + self.k8s_apis = urljoin(url, "apis/") + + # Gather the group names from which resource names will be derived + self.k8s_api_groups = self.api.get_groups(self.k8s_apis) + + for (name, versions) in self.k8s_api_groups: + for version in versions: + api = "%s/%s" % (name, version) + url = urljoin(self.k8s_apis, api) + self.k8s_api_resources[api] = self.api.get_resources(url) + + def create(self, obj, namespace): + ''' + Create an object from the Kubernetes cluster + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace) + + self.api.request("post", url, data=obj) + + logger.info("%s '%s' successfully created", kind.capitalize(), name) + + def delete(self, obj, namespace): + ''' + Delete an object from the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + + *Note* + Replication controllers must scale to 0 in order to delete pods. + Kubernetes 1.3 will implement server-side cascading deletion, but + until then, it's mandatory to scale to 0 + https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/garbage-collection.md + + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace, name) + + if kind in ['rcs', 'replicationcontrollers']: + self.scale(obj, namespace) + self.api.request("delete", url) + + logger.info("%s '%s' successfully deleted", kind.capitalize(), name) + + def scale(self, obj, namespace, replicas=0): + ''' + By default we scale back down to 0. This function takes an object and scales said + object down to a specified value on the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + ''' + patch = [{"op": "replace", + "path": "/spec/replicas", + "value": replicas}] + name = self._get_metadata_name(obj) + _, url = self._generate_kurl(obj, namespace, name) + self.api.request("patch", url, data=patch) + logger.info("'%s' successfully scaled to %s", name, replicas) + + def namespaces(self): + ''' + Gathers a list of namespaces on the Kubernetes cluster + ''' + url = urljoin(self.k8s_api, "namespaces") + ns = self.api.request("get", url) + return ns['items'] + + def _generate_kurl(self, obj, namespace, name=None, params=None): + ''' + Generate the required URL by extracting the 'kind' from the + object as well as the namespace. + + Args: + obj (obj): Object of the data being passed + namespace (str): k8s namespace + name (str): Name of the object being passed + params (arr): Extra params passed such as timeout=300 + + Returns: + kind (str): The kind used + url (str): The URL to be used / artifact URL + ''' + if 'apiVersion' not in obj.keys(): + raise KubeKubernetesError("Error processing object. There is no apiVersion") + + if 'kind' not in obj.keys(): + raise KubeKubernetesError("Error processing object. There is no kind") + + api_version = obj['apiVersion'] + + kind = obj['kind'] + + resource = KubeBase.kind_to_resource_name(kind) + + if resource in self.k8s_api_resources[api_version]: + if api_version == 'v1': + url = self.k8s_api + else: + url = urljoin(self.k8s_apis, "%s/" % api_version) + else: + raise KubeKubernetesError("No kind by that name: %s" % kind) + + url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + + if name: + url = urljoin(url, name) + + if params: + url = urljoin(url, "?%s" % urlencode(params)) + + return (resource, url) + + @staticmethod + def _get_metadata_name(obj): + ''' + This looks at the object and grabs the metadata name of said object + + Args: + obj (object): Object file of the artifact + + Returns: + name (str): Returns the metadata name of the object + ''' + if "metadata" in obj and \ + "name" in obj["metadata"]: + name = obj["metadata"]["name"] + else: + raise KubeKubernetesError("Cannot undeploy. There is no" + " name in object metadata " + "object=%s" % obj) + return name diff --git a/atomicapp/providers/lib/kubeshift/openshift.py b/atomicapp/providers/lib/kubeshift/openshift.py new file mode 100644 index 00000000..a5a46515 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/openshift.py @@ -0,0 +1,388 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import datetime +import time +import os +import tarfile +import logging +import re + +from urlparse import urljoin +from urllib import urlencode +from atomicapp.utils import Utils +from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import KubeOpenshiftError + +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeOpenshiftClient(object): + + def __init__(self, config): + ''' + + Args: + config (obj): Object of the configuration data + + ''' + + # The configuration data passed in will be .kube/config data, so process is accordingly. + self.api = KubeBase(config) + + # Check the API url + url = self.api.cluster['server'] + if not re.match('(?:http|https)://', url): + raise KubeOpenshiftError("OpenShift API URL does not include HTTP or HTTPS") + + # Gather what end-points we will be using + self.k8s_api = urljoin(url, "api/v1/") + self.oc_api = urljoin(url, "oapi/v1/") + + # Test the connection before proceeding + self.api.test_connection(self.k8s_api) + self.api.test_connection(self.oc_api) + + # Gather the resource names which will be used for the 'kind' API calls + self.oc_api_resources = self.api.get_resources(self.oc_api) + + # Gather what API groups are available + # TODO: refactor this (create function in kubebase.py) + self.k8s_api_resources = {} + self.k8s_api_resources['v1'] = self.api.get_resources(self.k8s_api) + self.k8s_apis = urljoin(url, "apis/") + + # Gather the group names from which resource names will be derived + self.k8s_api_groups = self.api.get_groups(self.k8s_apis) + + for (name, versions) in self.k8s_api_groups: + for version in versions: + api = "%s/%s" % (name, version) + url = urljoin(self.k8s_apis, api) + self.k8s_api_resources[api] = self.api.get_resources(url) + + def create(self, obj, namespace): + ''' + Create an object from the Kubernetes cluster + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace) + + # Must process through each object if kind is a 'template' + if kind is "template": + self._process_template(obj, namespace, "create") + else: + self.api.request("post", url, data=obj) + + logger.info("%s '%s' successfully created", kind.capitalize(), name) + + def delete(self, obj, namespace): + ''' + Delete an object from the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + + *Note* + Replication controllers must scale to 0 in order to delete pods. + Kubernetes 1.3 will implement server-side cascading deletion, but + until then, it's mandatory to scale to 0 + https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/garbage-collection.md + + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace, name) + + # Must process through each object if kind is a 'template' + if kind is "template": + self._process_template(obj, namespace, "create") + else: + if kind in ['rcs', 'replicationcontrollers']: + self.scale(obj, namespace) + self.api.request("delete", url) + + logger.info("%s '%s' successfully deleted", kind.capitalize(), name) + + def scale(self, obj, namespace, replicas=0): + ''' + By default we scale back down to 0. This function takes an object and scales said + object down to a specified value on the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + ''' + patch = [{"op": "replace", + "path": "/spec/replicas", + "value": replicas}] + name = self._get_metadata_name(obj) + _, url = self._generate_kurl(obj, namespace, name) + self.api.request("patch", url, data=patch) + logger.info("'%s' successfully scaled to %s", name, replicas) + + def namespaces(self): + ''' + Gathers a list of namespaces on the Kubernetes cluster + ''' + url = urljoin(self.oc_api, "projects") + ns = self.api.request("get", url) + return ns['items'] + + def _generate_kurl(self, obj, namespace, name=None, params=None): + ''' + Generate the required URL by extracting the 'kind' from the + object as well as the namespace. + + Args: + obj (obj): Object of the data being passed + namespace (str): k8s namespace + name (str): Name of the object being passed + params (arr): Extra params passed such as timeout=300 + + Returns: + kind (str): The kind used + url (str): The URL to be used / artifact URL + ''' + if 'apiVersion' not in obj.keys(): + raise KubeOpenshiftError("Error processing object. There is no apiVersion") + + if 'kind' not in obj.keys(): + raise KubeOpenshiftError("Error processing object. There is no kind") + + api_version = obj['apiVersion'] + + kind = obj['kind'] + + resource = KubeBase.kind_to_resource_name(kind) + + if resource in self.k8s_api_resources[api_version]: + if api_version == 'v1': + url = self.k8s_api + else: + url = urljoin(self.k8s_apis, "%s/" % api_version) + elif resource in self.oc_api_resources: + url = self.oc_api + else: + raise KubeOpenshiftError("No kind by that name: %s" % kind) + + url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + + if name: + url = urljoin(url, name) + + if params: + url = urljoin(url, "?%s" % urlencode(params)) + + return (resource, url) + + @staticmethod + def _get_metadata_name(obj): + ''' + This looks at the object and grabs the metadata name of said object + + Args: + obj (object): Object file of the artifact + + Returns: + name (str): Returns the metadata name of the object + ''' + if "metadata" in obj and \ + "name" in obj["metadata"]: + name = obj["metadata"]["name"] + else: + raise KubeOpenshiftError("Cannot undeploy. There is no" + " name in object metadata " + "object=%s" % obj) + return name + + # OPENSHIFT-SPECIFIC FUNCTIONS + + def extract(self, image, src, dest, namespace, update=True): + """ + Extract contents of a container image from 'src' in container + to 'dest' in host. + + Args: + image (str): Name of container image + src (str): Source path in container + dest (str): Destination path in host + update (bool): Update existing destination, if True + """ + if os.path.exists(dest) and not update: + return + cleaned_image_name = Utils.sanitizeName(image) + pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID()) + container_name = cleaned_image_name + + # Pull (if needed) image and bring up a container from it + # with 'sleep 3600' entrypoint, just to extract content from it + artifact = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': { + 'name': pod_name + }, + 'spec': { + 'containers': [ + { + 'image': image, + 'command': [ + 'sleep', + '3600' + ], + 'imagePullPolicy': 'IfNotPresent', + 'name': container_name + } + ], + 'restartPolicy': 'Always' + } + } + + self.create(artifact, namespace) + try: + self._wait_till_pod_runs(namespace, pod_name, timeout=300) + + # Archive content from the container and dump it to tmpfile + tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name) + + self._execute( + namespace, pod_name, container_name, + 'tar -cz --directory {} ./'.format('/' + src), + outfile=tmpfile + ) + finally: + # Delete created pod + self.delete(artifact, namespace) + + # Extract archive data + tar = tarfile.open(tmpfile, 'r:gz') + tar.extractall(dest) + + def _execute(self, namespace, pod, container, command, + outfile=None): + """ + Execute a command in a container in an Openshift pod. + + Args: + namespace (str): Namespace + pod (str): Pod name + container (str): Container name inside pod + command (str): Command to execute + outfile (str): Path to output file where results should be dumped + + Returns: + Command output (str) or None in case results dumped to output file + """ + args = { + 'token': self.api.token, + 'namespace': namespace, + 'pod': pod, + 'container': container, + 'command': ''.join(['command={}&'.format(word) for word in command.split()]) + } + url = urljoin( + self.k8s_api, + 'namespaces/{namespace}/pods/{pod}/exec?' + 'access_token={token}&container={container}&' + '{command}stdout=1&stdin=0&tty=0'.format(**args)) + + return self.api.websocket_request(url, outfile) + + def _process_template(self, obj, namespace, method): + _, url = self._generate_kurl(obj, namespace) + data = self.api.request("post", url, data=obj) + + if method is "create": + for o in data[0]['objects']: + name = self._get_metadata_name(o) + _, object_url = self._generate_kurl(o, namespace) + self.api.request("post", object_url, data=o) + logger.debug("Created template object: %s" % name) + elif method is "delete": + for o in data[0]['objects']: + name = self._get_metadata_name(o) + _, object_url = self._generate_kurl(o, namespace, name) + self.api.request("delete", object_url) + logger.debug("Deleted template object: %s" % name) + else: + raise KubeOpenshiftError("No method by that name to process template") + + logger.debug("Processed object template successfully") + + def _get_pod_status(self, namespace, pod): + """ + Get pod status. + + Args: + namespace (str): Openshift namespace + pod (str): Pod name + + Returns: + Status of pod (str) + + Raises: + ProviderFailedException when unable to fetch Pod status. + """ + args = { + 'namespace': namespace, + 'pod': pod, + 'access_token': self.api.token + } + url = urljoin( + self.k8s_api, + 'namespaces/{namespace}/pods/{pod}?' + 'access_token={access_token}'.format(**args)) + data = self.api.request("get", url) + + return data['status']['phase'].lower() + + def _wait_till_pod_runs(self, namespace, pod, timeout=300): + """ + Wait till pod runs, with a timeout. + + Args: + namespace (str): Openshift namespace + pod (str): Pod name + timeout (int): Timeout in seconds. + + Raises: + ProviderFailedException on timeout or when the pod goes to + failed state. + """ + now = datetime.datetime.now() + timeout_delta = datetime.timedelta(seconds=timeout) + while datetime.datetime.now() - now < timeout_delta: + status = self.oc.get_pod_status(namespace, pod) + if status == 'running': + break + elif status == 'failed': + raise KubeOpenshiftError( + 'Unable to run pod for extracting content: ' + '{namespace}/{pod}'.format(namespace=namespace, + pod=pod)) + time.sleep(1) + if status != 'running': + raise KubeOpenshiftError( + 'Timed out to extract content from pod: ' + '{namespace}/{pod}'.format(namespace=namespace, + pod=pod)) diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index 6a978ecf..ec3b43f0 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -21,15 +21,17 @@ import urlparse import logging import os +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.utils import printErrorStatus from atomicapp.utils import Utils from atomicapp.constants import PROVIDER_API_KEY -logger = logging.getLogger(__name__) +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) -class Marathon(Provider): +class MarathonProvider(Provider): key = "marathon" config_file = None @@ -49,7 +51,7 @@ def init(self): logger.debug("marathon_api = %s", self.marathon_api) self._process_artifacts() - def deploy(self): + def run(self): """ Deploys the app by given resource manifests. """ for artifact in self.marathon_artifacts: @@ -72,7 +74,7 @@ def deploy(self): logger.error(msg) raise ProviderFailedException(msg) - def undeploy(self): + def stop(self): """ Undeploys the app by given resource manifests. Undeploy operation deletes Marathon apps from cluster. """ @@ -108,15 +110,17 @@ def _process_artifacts(self): data = None with open(os.path.join(self.path, artifact), "r") as fp: try: - data = anymarkup.parse(fp) + # env variables in marathon artifacts have to be string:string + # force_types=None respects types from json file + data = anymarkup.parse(fp, force_types=None) logger.debug("Parsed artifact %s", data) # every marathon app has to have id. 'id' key is also used for showing messages if "id" not in data.keys(): msg = "Error processing %s artifact. There is no id" % artifact - printErrorStatus(msg) + cockpit_logger.error(msg) raise ProviderFailedException(msg) except anymarkup.AnyMarkupError, e: msg = "Error processing artifact - %s" % e - printErrorStatus(msg) + cockpit_logger.error(msg) raise ProviderFailedException(msg) self.marathon_artifacts.append(data) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 2a35b152..f1fe8415 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -17,446 +17,186 @@ along with Atomic App. If not, see . """ -import datetime -import os import anymarkup -import ssl -import tarfile -import time -from urlparse import urljoin -from urllib import urlencode -import websocket +import logging +import os -from atomicapp.utils import Utils -from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.constants import (ACCESS_TOKEN_KEY, +from atomicapp.constants import (PROVIDER_AUTH_KEY, ANSWERS_FILE, DEFAULT_NAMESPACE, - NAMESPACE_KEY, + LOGGER_DEFAULT, PROVIDER_API_KEY, + PROVIDER_CA_KEY, PROVIDER_TLS_VERIFY_KEY, - PROVIDER_CA_KEY) -from requests.exceptions import SSLError -import logging -logger = logging.getLogger(__name__) - - -class OpenshiftClient(object): - - def __init__(self, providerapi, access_token, - provider_tls_verify, provider_ca): - self.providerapi = providerapi - self.access_token = access_token - self.provider_tls_verify = provider_tls_verify - self.provider_ca = provider_ca - - # construct full urls for api endpoints - self.kubernetes_api = urljoin(self.providerapi, "api/v1/") - self.openshift_api = urljoin(self.providerapi, "oapi/v1/") - - logger.debug("kubernetes_api = %s", self.kubernetes_api) - logger.debug("openshift_api = %s", self.openshift_api) - - def test_connection(self): - """ - Test connection to OpenShift server - - Raises: - ProviderFailedException - Invalid SSL/TLS certificate - """ - logger.debug("Testing connection to OpenShift server") - - if self.provider_ca and not os.path.exists(self.provider_ca): - raise ProviderFailedException("Unable to find CA path %s" - % self.provider_ca) - - try: - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.openshift_api, - verify=self._requests_tls_verify()) - except SSLError as e: - if self.provider_tls_verify: - msg = "SSL/TLS ERROR: invalid certificate. " \ - "Add certificate of correct Certificate Authority providing" \ - " `%s` or you can disable SSL/TLS verification by `%s=False`" \ - % (PROVIDER_CA_KEY, PROVIDER_TLS_VERIFY_KEY) - raise ProviderFailedException(msg) - else: - # this shouldn't happen - raise ProviderFailedException(e.message) - - def get_oapi_resources(self): - """ - Get Openshift API resources - """ - # get list of supported resources for each api - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.openshift_api, - verify=self._requests_tls_verify()) - if status_code == 200: - oapi_resources = return_data["resources"] - else: - raise ProviderFailedException("Cannot get OpenShift resource list") - - # convert resources list of dicts to list of names - oapi_resources = [res['name'] for res in oapi_resources] - - logger.debug("Openshift resources %s", oapi_resources) - - return oapi_resources - - def get_kapi_resources(self): - """ - Get kubernetes API resources - """ - # get list of supported resources for each api - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.kubernetes_api, - verify=self._requests_tls_verify()) - if status_code == 200: - kapi_resources = return_data["resources"] - else: - raise ProviderFailedException("Cannot get Kubernetes resource list") - - # convert resources list of dicts to list of names - kapi_resources = [res['name'] for res in kapi_resources] - - logger.debug("Kubernetes resources %s", kapi_resources) - - return kapi_resources - - def deploy(self, url, artifact): - (status_code, return_data) = \ - Utils.make_rest_request("post", - url, - verify=self._requests_tls_verify(), - data=artifact) - if status_code == 201: - logger.info("Object %s sucessfully deployed.", - artifact['metadata']['name']) - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - # TODO: remove running components (issue: #428) - raise ProviderFailedException(msg) - - def delete(self, url): - """ - Delete object on given url - - Args: - url (str): full url for artifact - - Raises: - ProviderFailedException: error when calling remote api - """ - (status_code, return_data) = \ - Utils.make_rest_request("delete", - url, - verify=self._requests_tls_verify()) - if status_code == 200: - logger.info("Sucessfully deleted.") - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - raise ProviderFailedException(msg) - - def process_template(self, url, template): - (status_code, return_data) = \ - Utils.make_rest_request("post", - url, - verify=self._requests_tls_verify(), - data=template) - if status_code == 201: - logger.info("template proccessed %s", template['metadata']['name']) - logger.debug("processed template %s", return_data) - return return_data['objects'] - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - raise ProviderFailedException(msg) - - def _requests_tls_verify(self): - """ - Return verify parameter for function Utils.make_rest_request - in format that is used by requests library. - see: http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification - """ - if self.provider_ca: - return self.provider_ca - else: - return self.provider_tls_verify - - def execute(self, namespace, pod, container, command, - outfile=None): - """ - Execute a command in a container in an Openshift pod. - - Args: - namespace (str): Namespace - pod (str): Pod name - container (str): Container name inside pod - command (str): Command to execute - outfile (str): Path to output file where results should be dumped - - Returns: - Command output (str) or None in case results dumped to output file - """ - args = { - 'token': self.access_token, - 'namespace': namespace, - 'pod': pod, - 'container': container, - 'command': ''.join(['command={}&'.format(word) for word in command.split()]) - } - url = urljoin( - self.kubernetes_api, - 'namespaces/{namespace}/pods/{pod}/exec?' - 'access_token={token}&container={container}&' - '{command}stdout=1&stdin=0&tty=0'.format(**args)) - - # The above endpoint needs the request to be upgraded to SPDY, - # which python-requests does not yet support. However, the same - # endpoint works over websockets, so we are using websocket client. - - # Convert url from http(s) protocol to wss protocol - url = 'wss://' + url.split('://', 1)[-1] - logger.debug('url: {}'.format(url)) - - results = [] - - ws = websocket.WebSocketApp( - url, - on_message=lambda ws, message: self._handle_exec_reply(ws, message, results, outfile)) - - ws.run_forever(sslopt={ - 'ca_certs': self.provider_ca, - 'cert_reqs': ssl.CERT_REQUIRED if self.provider_tls_verify else ssl.CERT_NONE}) - - if not outfile: - return ''.join(results) - - def _handle_exec_reply(self, ws, message, results, outfile=None): - """ - Handle reply message for exec call - """ - # FIXME: For some reason, we do not know why, we need to ignore the - # 1st char of the message, to generate a meaningful result - cleaned_msg = message[1:] - if outfile: - with open(outfile, 'ab') as f: - f.write(cleaned_msg) - else: - results.append(cleaned_msg) - - def get_pod_status(self, namespace, pod): - """ - Get pod status. + LOGGER_COCKPIT, + OC_DEFAULT_API) +from atomicapp.plugin import Provider, ProviderFailedException - Args: - namespace (str): Openshift namespace - pod (str): Pod name +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.utils import Utils +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) - Returns: - Status of pod (str) - Raises: - ProviderFailedException when unable to fetch Pod status. - """ - args = { - 'namespace': namespace, - 'pod': pod, - 'access_token': self.access_token - } - url = urljoin( - self.kubernetes_api, - 'namespaces/{namespace}/pods/{pod}?' - 'access_token={access_token}'.format(**args)) - (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self._requests_tls_verify()) - - if status_code != 200: - raise ProviderFailedException( - 'Could not fetch status for pod: {namespace}/{pod}'.format( - namespace=namespace, pod=pod)) - return return_data['status']['phase'].lower() +class OpenshiftProvider(Provider): + """Operations for OpenShift provider is implemented in this class. + This class implements deploy, stop and undeploy of an atomicapp on + OpenShift provider. + """ -class OpenShiftProvider(Provider): + # Class variables key = "openshift" - cli_str = "oc" - cli = None - config_file = None - template_data = None - providerapi = "https://127.0.0.1:8443" - openshift_api = None - kubernetes_api = None - access_token = None namespace = DEFAULT_NAMESPACE + oc_artifacts = {} - # verify tls/ssl connection - provider_tls_verify = True - # path to file or dir with CA certificates - provider_ca = None + # From the provider configuration + config_file = None - # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts. - openshift_artifacts = {} + # Essential provider parameters + provider_api = None + provider_auth = None + provider_tls_verify = None + provider_ca = None def init(self): - self.openshift_artifacts = {} + self.oc_artifacts = {} - self._set_config_values() + logger.debug("Given config: %s", self.config) + if self.config.get("namespace"): + self.namespace = self.config.get("namespace") - self.oc = OpenshiftClient(self.providerapi, - self.access_token, - self.provider_tls_verify, - self.provider_ca) - self.openshift_api = self.oc.openshift_api - self.kubernetes_api = self.oc.kubernetes_api + logger.info("Using namespace %s", self.namespace) - # test connection to openshift server - self.oc.test_connection() + self._process_artifacts() - self.oapi_resources = self.oc.get_oapi_resources() - self.kapi_resources = self.oc.get_kapi_resources() + if self.dryrun: + return - self._process_artifacts() + ''' + Config_file: + If a config_file has been provided, use the configuration + from the file and load the associated generated file. + If a config_file exists (--provider-config) use that. - def _get_namespace(self, artifact): - """ - Return namespace for artifact. If namespace is specified inside - artifact use that, if not return default namespace (as specfied in - answers.conf) + Params: + If any provider specific parameters have been provided, + load the configuration through the answers.conf file - Args: - artifact (dict): OpenShift/Kubernetes object + .kube/config: + If no config file or params are provided by user then try to find and + use a config file at the default location. - Returns: - namespace (str) - """ - if "metadata" in artifact and "namespace" in artifact["metadata"]: - return artifact["metadata"]["namespace"] - return self.namespace - - def deploy(self): - logger.debug("Deploying to OpenShift") - # TODO: remove running components if one component fails issue:#428 - for kind, objects in self.openshift_artifacts.iteritems(): - for artifact in objects: - namespace = self._get_namespace(artifact) - url = self._get_url(namespace, kind) + no config at all: + If no .kube/config file can be found then try to connect to the default + unauthenticated http://localhost:8080/api end-point. + ''' - if self.dryrun: - logger.info("DRY-RUN: %s", url) - continue - self.oc.deploy(url, artifact) + default_config_loc = os.path.join( + Utils.getRoot(), Utils.getUserHome().strip('/'), '.kube/config') - def undeploy(self): - """ - Undeploy application. - - Cascade the deletion of the resources managed other resource - (e.g. ReplicationControllers created by a DeploymentConfig and - Pods created by a ReplicationController). - When using command line client this is done automatically - by `oc` command. - When using API calls we have to cascade deletion manually. - """ - logger.debug("Starting undeploy") - delete_artifacts = [] - for kind, objects in self.openshift_artifacts.iteritems(): - delete_artifacts.extend(objects) - - for artifact in delete_artifacts: - kind = artifact["kind"].lower() - namespace = self._get_namespace(artifact) - - # get name from metadata so we know which object to delete - if "metadata" in artifact and \ - "name" in artifact["metadata"]: - name = artifact["metadata"]["name"] - else: - raise ProviderFailedException("Cannot undeploy. There is no" - " name in artifacts metadata " - "artifact=%s" % artifact) - - logger.info("Undeploying artifact name=%s kind=%s" % (name, kind)) - - # If this is a DeploymentConfig we need to delete all - # ReplicationControllers that were created by this DC. Find the RC - # that belong to this DC by querying for all RC and filtering based - # on automatically created label openshift.io/deployment-config.name - if kind.lower() == "deploymentconfig": - params = {"labelSelector": - "openshift.io/deployment-config.name=%s" % name} - url = self._get_url(namespace, - "replicationcontroller", - params=params) - (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) - if status_code != 200: - raise ProviderFailedException("Cannot get Replication" - "Controllers for Deployment" - "Config %s (status code %s)" % - (name, status_code)) - # kind of returned data is ReplicationControllerList - # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-replicationcontrollerlist - # we need modify items to get valid ReplicationController - items = return_data["items"] - for item in items: - item["kind"] = "ReplicationController" - item["apiVersion"] = return_data["apiVersion"] - # add items to list of artifact to be deleted - delete_artifacts.extend(items) - - # If this is a ReplicationController we need to delete all - # Pods that were created by this RC. Find the pods that - # belong to this RC by querying for all pods and filtering - # based on the selector used in the RC. - if kind.lower() == "replicationcontroller": - selector = ",".join(["%s=%s" % (k, v) for k, v in artifact["spec"]["selector"].iteritems()]) - logger.debug("Using labelSelector: %s" % selector) - params = {"labelSelector": selector} - url = self._get_url(namespace, "pod", params=params) - (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) - if status_code != 200: - raise ProviderFailedException("Cannot get Pods for " - "ReplicationController %s" - " (status code %s)" % - (name, status_code)) - # kind of returned data is ReplicationControllerList - # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-podlist - # we need to modify items to get valid Pod - items = return_data["items"] - for item in items: - item["kind"] = "Pod" - item["apiVersion"] = return_data["apiVersion"] - # add items to list of artifact to be deleted - delete_artifacts.extend(items) - - url = self._get_url(namespace, kind, name) - - if self.dryrun: - logger.info("DRY-RUN: DELETE %s", url) - else: - self.oc.delete(url) + if self.config_file: + logger.debug("Provider configuration provided") + self.api = Client(KubeConfig.from_file(self.config_file), "openshift") + elif self._check_required_params(): + logger.debug("Generating .kube/config from given parameters") + self.api = Client(self._from_required_params(), "openshift") + elif os.path.isfile(default_config_loc): + logger.debug(".kube/config exists, using default configuration file") + self.api = Client(KubeConfig.from_file(default_config_loc), "openshift") + else: + self.config["provider-api"] = OC_DEFAULT_API + self.api = Client(self._from_required_params(), "openshift") + + self._check_namespaces() + + def _build_param_dict(self): + # Initialize the values + paramdict = {PROVIDER_API_KEY: self.provider_api, + PROVIDER_AUTH_KEY: self.provider_auth, + PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, + PROVIDER_CA_KEY: self.provider_ca} + + # Get values from the loaded answers.conf / passed CLI params + for k in paramdict.keys(): + paramdict[k] = self.config.get(k) + + return paramdict + + def _check_required_params(self, exception=False): + ''' + This checks to see if required parameters associated to the Kubernetes + provider are passed. + PROVIDER_API_KEY and PROVIDER_AUTH_KEY are *required*. Token may be blank. + ''' + + paramdict = self._build_param_dict() + logger.debug("List of parameters passed: %s" % paramdict) + + # Check that the required parameters are passed. If not, error out. + for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY]: + if paramdict[k] is None: + if exception: + msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) + raise ProviderFailedException(msg) + else: + return False + + return True + + def _from_required_params(self): + ''' + Create a default configuration from passed environment parameters. + ''' + + self._check_required_params(exception=True) + paramdict = self._build_param_dict() + + logger.debug("Building from required params") + # Generate the configuration from the paramters + config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY], + auth=paramdict[PROVIDER_AUTH_KEY], + ca=paramdict[PROVIDER_CA_KEY], + verify=paramdict[PROVIDER_TLS_VERIFY_KEY]) + logger.debug("Passed configuration for .kube/config %s" % config) + return config + + def _check_namespaces(self): + ''' + This function checks to see whether or not the namespaces created in the cluster match the + namespace that is associated and/or provided in the deployed application + ''' + + # Get the namespaces and output the currently used ones + namespace_list = self.api.namespaces() + logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) + + # Create a namespace list + namespaces = [] + for ns in namespace_list: + namespaces.append(ns["metadata"]["name"]) + + # Output the namespaces and check to see if the one provided exists + logger.debug("Namespaces: %s" % namespaces) + if self.namespace not in namespaces: + msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace + raise ProviderFailedException(msg) def _process_artifacts(self): """ - Parse OpenShift manifests files and checks if manifest under - process is valid. Reads self.artifacts and saves parsed artifacts - to self.openshift_artifacts + Parse each Kubernetes file and convert said format into an Object for + deployment. """ for artifact in self.artifacts: logger.debug("Processing artifact: %s", artifact) data = None + + # Open and parse the artifact data with open(os.path.join(self.path, artifact), "r") as fp: data = anymarkup.parse(fp, force_types=None) + # Process said artifacts self._process_artifact_data(artifact, data) def _process_artifact_data(self, artifact, data): @@ -467,384 +207,58 @@ def _process_artifact_data(self, artifact, data): artifact (str): Artifact name data (dict): Artifact data """ - # kind has to be specified in artifact + + # Check if kind exists if "kind" not in data.keys(): raise ProviderFailedException( "Error processing %s artifact. There is no kind" % artifact) + # Change to lower case so it's easier to parse kind = data["kind"].lower() - resource = self._kind_to_resource(kind) - - # check if resource is supported by apis - if resource not in self.oapi_resources \ - and resource not in self.kapi_resources: - raise ProviderFailedException( - "Unsupported kind %s in artifact %s" % (kind, artifact)) - - # process templates - if kind == "template": - processed_objects = self._process_template(data) - # add all processed object to artifacts dict - for obj in processed_objects: - obj_kind = obj["kind"].lower() - if obj_kind not in self.openshift_artifacts.keys(): - self.openshift_artifacts[obj_kind] = [] - self.openshift_artifacts[obj_kind].append(obj) - return - - # add parsed artifact to dict - if kind not in self.openshift_artifacts.keys(): - self.openshift_artifacts[kind] = [] - self.openshift_artifacts[kind].append(data) - - def _process_template(self, template): - """ - Call OpenShift api and process template. - Templates allow parameterization of resources prior to being sent to - the server for creation or update. Templates have "parameters", - which may either be generated on creation or set by the user. - - Args: - template (dict): template to process - - Returns: - List of objects from processed template. - """ - logger.debug("processing template: %s", template) - url = self._get_url(self._get_namespace(template), "processedtemplates") - return self.oc.process_template(url, template) - - def _kind_to_resource(self, kind): - """ - Converts kind to resource name. It is same logics - as in k8s.io/kubernetes/pkg/api/meta/restmapper.go (func KindToResource) - Example: - Pod -> pods - Policy - > policies - BuildConfig - > buildconfigs - - Args: - kind (str): Kind of the object - - Returns: - Resource name (str) (kind in plural form) - """ - singular = kind.lower() - if singular.endswith("status"): - plural = singular + "es" - else: - if singular[-1] == "s": - plural = singular - elif singular[-1] == "y": - plural = singular.rstrip("y") + "ies" - else: - plural = singular + "s" - return plural - - def _get_url(self, namespace, kind, name=None, params=None): - """ - Some kinds/resources are managed by OpensShift and some by Kubernetes. - Here we compose right url (Kubernets or OpenShift) for given kind. - If resource is managed by Kubernetes or OpenShift is determined by - self.kapi_resources/self.oapi_resources lists - Example: - For namespace=project1, kind=DeploymentConfig, name=dc1 result - would be http://example.com:8443/oapi/v1/namespaces/project1/deploymentconfigs/dc1 - - Args: - namespace (str): Kubernetes namespace or Openshift project name - kind (str): kind of the object - name (str): object name if modifying or deleting specific object (optional) - params (dict): query parameters {"key":"value"} url?key=value - - Returns: - Full url (str) for given kind, namespace and name - """ - url = None - - resource = self._kind_to_resource(kind) - if resource in self.oapi_resources: - url = self.openshift_api - elif resource in self.kapi_resources: - url = self.kubernetes_api + if kind not in self.oc_artifacts.keys(): + self.oc_artifacts[kind] = [] - url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + # Fail if there is no metadata + if 'metadata' not in data: + raise ProviderFailedException( + "Error processing %s artifact. There is no metadata object" % artifact) - if name: - url = urljoin(url, name) + # Change to the namespace specified on init() + data['metadata']['namespace'] = self.namespace - if params: - params["access_token"] = self.access_token + if 'labels' not in data['metadata']: + data['metadata']['labels'] = {'namespace': self.namespace} else: - params = {"access_token": self.access_token} + data['metadata']['labels']['namespace'] = self.namespace - url = urljoin(url, "?%s" % urlencode(params)) - logger.debug("url: %s", url) - return url + self.oc_artifacts[kind].append(data) - def _parse_kubeconf(self, filename): - """" - Parse kubectl config file - - Args: - filename (string): path to configuration file (e.g. ./kube/config) - - Returns: - dict of parsed values from config - - Example of expected file format: - apiVersion: v1 - clusters: - - cluster: - server: https://10.1.2.2:8443 - certificate-authority: path-to-ca.cert - insecure-skip-tls-verify: false - name: 10-1-2-2:8443 - contexts: - - context: - cluster: 10-1-2-2:8443 - namespace: test - user: test-admin/10-1-2-2:8443 - name: test/10-1-2-2:8443/test-admin - current-context: test/10-1-2-2:8443/test-admin - kind: Config - preferences: {} - users: - - name: test-admin/10-1-2-2:8443 - user: - token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF + def run(self): """ - logger.debug("Parsing %s", filename) - - with open(filename, 'r') as fp: - kubecfg = anymarkup.parse(fp.read()) - - try: - return self._parse_kubeconf_data(kubecfg) - except ProviderFailedException: - raise ProviderFailedException('Invalid %s' % filename) - - def _parse_kubeconf_data(self, kubecfg): + Deploys the app by given resource artifacts. """ - Parse kubeconf data. + logger.info("Deploying to OpenShift") - Args: - kubecfg (dict): Kubernetes config data - - Returns: - dict of parsed values from config - """ - url = None - token = None - namespace = None - tls_verify = True - ca = None - - current_context = kubecfg["current-context"] - - logger.debug("current context: %s", current_context) - - context = None - for co in kubecfg["contexts"]: - if co["name"] == current_context: - context = co - - if not context: - raise ProviderFailedException() - - cluster = None - for cl in kubecfg["clusters"]: - if cl["name"] == context["context"]["cluster"]: - cluster = cl - - user = None - for usr in kubecfg["users"]: - if usr["name"] == context["context"]["user"]: - user = usr - - if not cluster or not user: - raise ProviderFailedException() - - logger.debug("context: %s", context) - logger.debug("cluster: %s", cluster) - logger.debug("user: %s", user) - - url = cluster["cluster"]["server"] - token = user["user"]["token"] - if "namespace" in context["context"]: - namespace = context["context"]["namespace"] - if "insecure-skip-tls-verify" in cluster["cluster"]: - tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] - elif "certificate-authority" in cluster["cluster"]: - ca = cluster["cluster"]["certificate-authority"] - - return {PROVIDER_API_KEY: url, - ACCESS_TOKEN_KEY: token, - NAMESPACE_KEY: namespace, - PROVIDER_TLS_VERIFY_KEY: tls_verify, - PROVIDER_CA_KEY: ca} - - def _set_config_values(self): - """ - Reads providerapi, namespace and accesstoken from answers.conf and - corresponding values from providerconfig (if set). - Use one that is set, if both are set and have conflicting values raise - exception. - - Raises: - ProviderFailedException: values in providerconfig and answers.conf - are in conflict - - """ - - # initialize result to default values - result = {PROVIDER_API_KEY: self.providerapi, - ACCESS_TOKEN_KEY: self.access_token, - NAMESPACE_KEY: self.namespace, - PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, - PROVIDER_CA_KEY: self.provider_ca} - - # create keys in dicts and initialize values to None - answers = {} - providerconfig = {} - for k in result.keys(): - answers[k] = None - providerconfig[k] = None - - # get values from answers.conf - for k in result.keys(): - answers[k] = self.config.get(k) - - # get values from providerconfig - if self.config_file: - providerconfig = self._parse_kubeconf(self.config_file) - - # decide between values from answers.conf and providerconfig - # if only one is set use that, report if they are in conflict - for k in result.keys(): - if answers[k] is not None and providerconfig[k] is None: - result[k] = answers[k] - if answers[k] is None and providerconfig[k] is not None: - result[k] = providerconfig[k] - if answers[k] is not None and providerconfig[k] is not None: - if answers[k] == providerconfig[k]: - result[k] = answers[k] + for kind, objects in self.oc_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) else: - msg = "There are conflicting values in %s (%s) and %s (%s)"\ - % (self.config_file, providerconfig[k], ANSWERS_FILE, - answers[k]) - logger.error(msg) - raise ProviderFailedException(msg) - - logger.debug("config values: %s" % result) - - # this items are required, they have to be not None - for k in [PROVIDER_API_KEY, ACCESS_TOKEN_KEY, NAMESPACE_KEY]: - if result[k] is None: - msg = "You need to set %s in %s" % (k, ANSWERS_FILE) - logger.error(msg) - raise ProviderFailedException(msg) - - # set config values - self.providerapi = result[PROVIDER_API_KEY] - self.access_token = result[ACCESS_TOKEN_KEY] - self.namespace = result[NAMESPACE_KEY] - self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] - if result[PROVIDER_CA_KEY]: - # if we are in container translate path to path on host - self.provider_ca = os.path.join(Utils.getRoot(), - result[PROVIDER_CA_KEY].lstrip('/')) - else: - self.provider_ca = None + self.api.create(artifact, self.namespace) - def extract(self, image, src, dest, update=True): + def stop(self): + """Undeploys the app by given resource manifests. + Undeploy operation first scale down the replicas to 0 and then deletes + the resource from cluster. """ - Extract contents of a container image from 'src' in container - to 'dest' in host. + logger.info("Undeploying from OpenShift") - Args: - image (str): Name of container image - src (str): Source path in container - dest (str): Destination path in host - update (bool): Update existing destination, if True - """ - if os.path.exists(dest) and not update: - return - cleaned_image_name = Utils.sanitizeName(image) - pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID()) - container_name = cleaned_image_name - - # Pull (if needed) image and bring up a container from it - # with 'sleep 3600' entrypoint, just to extract content from it - artifact = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': pod_name - }, - 'spec': { - 'containers': [ - { - 'image': image, - 'command': [ - 'sleep', - '3600' - ], - 'imagePullPolicy': 'IfNotPresent', - 'name': container_name - } - ], - 'restartPolicy': 'Always' - } - } - - self.oc.deploy(self._get_url(self.namespace, 'Pod'), artifact) - try: - self._wait_till_pod_runs(self.namespace, pod_name, timeout=300) - - # Archive content from the container and dump it to tmpfile - tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name) - self.oc.execute( - self.namespace, pod_name, container_name, - 'tar -cz --directory {} ./'.format('/' + src), - outfile=tmpfile - ) - finally: - # Delete created pod - self.oc.delete(self._get_url(self.namespace, 'Pod', pod_name)) - - # Extract archive data - tar = tarfile.open(tmpfile, 'r:gz') - tar.extractall(dest) - - def _wait_till_pod_runs(self, namespace, pod, timeout=300): - """ - Wait till pod runs, with a timeout. - - Args: - namespace (str): Openshift namespace - pod (str): Pod name - timeout (int): Timeout in seconds. - - Raises: - ProviderFailedException on timeout or when the pod goes to - failed state. - """ - now = datetime.datetime.now() - timeout_delta = datetime.timedelta(seconds=timeout) - while datetime.datetime.now() - now < timeout_delta: - status = self.oc.get_pod_status(namespace, pod) - if status == 'running': - break - elif status == 'failed': - raise ProviderFailedException( - 'Unable to run pod for extracting content: ' - '{namespace}/{pod}'.format(namespace=namespace, - pod=pod)) - time.sleep(1) - if status != 'running': - raise ProviderFailedException( - 'Timed out to extract content from pod: ' - '{namespace}/{pod}'.format(namespace=namespace, - pod=pod)) + for kind, objects in self.oc_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.delete(artifact, self.namespace) diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index 48db99cb..0212bd59 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -1,9 +1,28 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import logging -from atomicapp.constants import REQUIREMENT_FUNCTIONS +from atomicapp.constants import (LOGGER_DEFAULT, + REQUIREMENT_FUNCTIONS) from atomicapp.plugin import Plugin -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Requirements: @@ -25,7 +44,6 @@ class Requirements: def __init__(self, config, basepath, graph, provider, dryrun): self.plugin = Plugin() - self.plugin.load_plugins() self.config = config self.basepath = basepath @@ -44,13 +62,10 @@ def run(self): def stop(self): self._exec("stop") - def uninstall(self): - self._exec("uninstall") - # Find if the requirement does not exist within REQUIREMENT_FUNCTIONS def _find_requirement_function_name(self, key): - logging.debug("Checking if %s matches any of %s" % - (key, REQUIREMENT_FUNCTIONS)) + logger.debug("Checking if %s matches any of %s" % + (key, REQUIREMENT_FUNCTIONS)) if key in REQUIREMENT_FUNCTIONS.keys(): return REQUIREMENT_FUNCTIONS[key] raise RequirementFailedException("Requirement %s does not exist." % key) @@ -63,13 +78,14 @@ def _exec(self, action): requirement_function = self._find_requirement_function_name(key_name) # Check to see if the function exists in the provider, - # if it does not: fail + # if it does not: warn the user try: requirement = getattr(self.provider, requirement_function) except AttributeError: - raise RequirementFailedException( - "Requirement %s does not exist within %s." % + logger.warning( + "Requirement %s does not exist within %s. Skipping." % (requirement_function, self.provider)) + continue # Run the requirement function requirement(req[key_name], action) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 7fc50866..022f3759 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -20,10 +20,10 @@ from __future__ import print_function import distutils.dir_util import os +import pwd import sys import tempfile import re -import collections import anymarkup import uuid import requests @@ -36,29 +36,19 @@ CACHE_DIR, EXTERNAL_APP_DIR, HOST_DIR, - WORKDIR, - ARTIFACTS_FOLDER) + LOGGER_COCKPIT, + LOGGER_DEFAULT, + WORKDIR) __all__ = ('Utils') -logger = logging.getLogger(__name__) +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) +logger = logging.getLogger(LOGGER_DEFAULT) class AtomicAppUtilsException(Exception): pass -# Following Methods(printStatus, printErrorStatus) -# are required for Cockpit or thirdparty management tool integration -# DONOT change the atomicapp.status.* prefix in the logger method. - - -def printStatus(message): - logger.info("atomicapp.status.info.message=" + str(message)) - - -def printErrorStatus(message): - logger.info("atomicapp.status.error.message=" + str(message)) - def find_binary(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. @@ -271,16 +261,17 @@ def run_cmd(cmd, checkexitcode=True, stdin=None): # we were asked not to. if checkexitcode: if ec != 0: - printErrorStatus("cmd failed: %s" % str(cmd)) # For cockpit + cockpit_logger.error("cmd failed: %s" % str(cmd)) raise AtomicAppUtilsException( "cmd: %s failed: \n%s" % (str(cmd), stderr)) return ec, stdout, stderr @staticmethod - def askFor(what, info): + def askFor(what, info, app_name): repeat = True desc = info["description"] + logger.debug(info) constraints = None if "constraints" in info: constraints = info["constraints"] @@ -288,12 +279,12 @@ def askFor(what, info): repeat = False if "default" in info: value = raw_input( - "%s (%s, default: %s): " % (what, desc, info["default"])) + "ANSWER => %s | %s (%s, default: %s): " % (app_name, what, desc, info["default"])) if len(value) == 0: value = info["default"] else: try: - value = raw_input("%s (%s): " % (what, desc)) + value = raw_input("ANSWER => %s | %s (%s): " % (app_name, what, desc)) except EOFError: raise @@ -306,22 +297,6 @@ def askFor(what, info): return value - @staticmethod - def update(old_dict, new_dict): - for key, val in new_dict.iteritems(): - if isinstance(val, collections.Mapping): - tmp = Utils.update(old_dict.get(key, {}), val) - old_dict[key] = tmp - elif isinstance(val, list) and key in old_dict: - res = (old_dict[key] + val) - if isinstance(val[0], collections.Mapping): - old_dict[key] = [dict(y) for y in set(tuple(x.items()) for x in res)] - else: - old_dict[key] = list(set(res)) - else: - old_dict[key] = new_dict[key] - return old_dict - @staticmethod def getAppId(path): # obsolete @@ -345,7 +320,8 @@ def getDockerCli(dryrun=False): @staticmethod def inContainer(): """ - Determine if we are running inside a container or not. + Determine if we are running inside a container or not. This is done by + checking to see if /host has been passed. Returns: (bool): True == we are in a container @@ -362,6 +338,21 @@ def getRoot(): else: return "/" + @staticmethod + def get_real_abspath(path): + """ + Take the user provided 'path' and return the real path to the resource + irrespective of the app running location either inside container or + outside. + + Args: + path (str): path to a resource + + Returns: + str: absolute path to resource in the filesystem. + """ + return os.path.join(Utils.getRoot(), path.lstrip('/')) + # generates a unique 12 character UUID @staticmethod def getUniqueUUID(): @@ -369,13 +360,22 @@ def getUniqueUUID(): return data @staticmethod - def loadAnswers(answers_file): + def loadAnswers(answers_file, format=None): if not os.path.isfile(answers_file): raise AtomicAppUtilsException( "Provided answers file does not exist: %s" % answers_file) logger.debug("Loading answers from file: %s", answers_file) - return anymarkup.parse_file(answers_file) + try: + # Try to load answers file with a specified answers file format + # or the default format. + result = anymarkup.parse_file(answers_file, format=format) + except anymarkup.AnyMarkupError: + # if no answers file format is provided and the answers file + # is not a JSON file, try to load it using anymarkup in a + # generic way. + result = anymarkup.parse_file(answers_file) + return result @staticmethod def copy_dir(src, dest, update=False, dryrun=False): @@ -388,12 +388,107 @@ def rm_dir(directory): distutils.dir_util.remove_tree(directory) @staticmethod - def getSupportedProviders(path): - providers = os.listdir(path + '/' + ARTIFACTS_FOLDER) - return providers + def getUidGid(user): + """ + Get the UID and GID of the specific user by grepping /etc/passwd unless + we are in a container. + + Returns: + (int): User UID + (int): User GID + """ + + # If we're in a container we should be looking in the /host/ directory + if Utils.inContainer(): + os.chroot(HOST_DIR) + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + os.chroot("../..") + else: + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + + return int(uid), int(gid) + + @staticmethod + def setFileOwnerGroup(src): + """ + This function sets the correct uid and gid bits to a source + file or directory given the current user that is running Atomic + App. + """ + user = Utils.getUserName() + + # Get the UID of the User + uid, gid = Utils.getUidGid(user) + + logger.debug("Setting gid/uid of %s to %s,%s" % (src, uid, gid)) + + # chown the file/dir + os.chown(src, uid, gid) + + # If it's a dir, chown all files within it + if os.path.isdir(src): + for root, dirs, files in os.walk(src): + for d in dirs: + os.chown(os.path.join(root, d), uid, gid) + for f in files: + os.chown(os.path.join(root, f), uid, gid) + + @staticmethod + def getUserName(): + """ + Finds the username of the user running the application. Uses the + SUDO_USER and USER environment variables. If runnning within a + container, SUDO_USER and USER varibles must be passed for proper + detection. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + sudo_user = os.environ.get('SUDO_USER') + + if os.getegid() == 0 and sudo_user is None: + user = 'root' + elif sudo_user is not None: + user = sudo_user + else: + user = os.environ.get('USER') + return user + + @staticmethod + def getUserHome(): + """ + Finds the home directory of the user running the application. + If runnning within a container, the root dir must be passed as + a volume. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + logger.debug("Finding the users home directory") + user = Utils.getUserName() + incontainer = Utils.inContainer() + + # Check to see if we are running in a container. If we are we + # will chroot into the /host path before calling os.path.expanduser + if incontainer: + os.chroot(HOST_DIR) + + # Call os.path.expanduser to determine the user's home dir. + # See https://docs.python.org/2/library/os.path.html#os.path.expanduser + # Warn if none is detected, don't error as not having a home + # dir doesn't mean we fail. + home = os.path.expanduser("~%s" % user) + if home == ("~%s" % user): + logger.error("No home directory exists for user %s" % user) + + # Back out of chroot if necessary + if incontainer: + os.chroot("../..") + + logger.debug("Running as user %s. Using home directory %s for configuration data" + % (user, home)) + return home @staticmethod - def make_rest_request(method, url, verify=True, data=None): + def make_rest_request(method, url, verify=True, data=None, headers={}): """ Make HTTP request to url @@ -420,13 +515,16 @@ def make_rest_request(method, url, verify=True, data=None): try: if method.lower() == "get": - res = requests.get(url, verify=verify) + res = requests.get(url, verify=verify, headers=headers) elif method.lower() == "post": - res = requests.post(url, json=data, verify=verify) + res = requests.post(url, json=data, verify=verify, headers=headers) elif method.lower() == "put": - res = requests.put(url, json=data, verify=verify) + res = requests.put(url, json=data, verify=verify, headers=headers) elif method.lower() == "delete": - res = requests.delete(url, json=data, verify=verify) + res = requests.delete(url, json=data, verify=verify, headers=headers) + elif method.lower() == "patch": + headers.update({"Content-Type": "application/json-patch+json"}) + res = requests.patch(url, json=data, verify=verify, headers=headers) status_code = res.status_code return_data = res.json() diff --git a/build_run.sh b/build_run.sh index 382d0525..e13c29c4 100755 --- a/build_run.sh +++ b/build_run.sh @@ -1,11 +1,15 @@ #!/bin/bash WHAT=$1 +DISTRO=$2 # TODO sanity check that we got docker >= 1.6 -[ -z "${WHAT}" ] && echo "Need to provide a distro you want to build for (fedora|centos|rhel7|debian)" && exit -IMAGE_NAME=atomicapp-${WHAT} +[ -z "${WHAT}" ] && echo "Need to provide a source location to build from (git|pkgs)" && echo "usage: build_run.sh SRC_LOCATION DISTRO" && exit + +[ -z "${DISTRO}" ] && echo "Need to provide a distro you want to build for (fedora|centos|rhel7|debian)" && echo "usage: build_run.sh SRC_LOCATION DISTRO" && exit + +IMAGE_NAME=atomicapp-${WHAT}-${DISTRO} if [ -z "$USERNAME" ]; then echo "setting USERNAME to " `whoami` @@ -13,7 +17,7 @@ if [ -z "$USERNAME" ]; then fi echo docker build $USERNAME/$IMAGE_NAME -docker build --rm --tag $USERNAME/$IMAGE_NAME --file Dockerfile.${WHAT} . +docker build --rm --tag $USERNAME/$IMAGE_NAME --file Dockerfiles.${WHAT}/Dockerfile.${DISTRO} . #doesn't really make sense to run it #test diff --git a/docs/cli.md b/docs/cli.md index 97ecaf26..3d9f6021 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,5 +1,65 @@ # Atomic App Command Line Interface (CLI) +The Atomic App software allows for several actions to be applied to +specified applications. The four actions that exist today are briefly +described below. + +## CLI Commands + +`genanswers` +------------ +Will download and combine artifacts from the target application in a +temporary directory and then take the generated sample answers.conf +file and populate it in the users working directory. The temporary +directory is then cleaned up. + +`init` +---------- +Initialize a directory with an example Atomic App application using +the `centos/httpd` container image. This is a templated file structure including +Docker and Kubernetes artifact examples. + +`index` +--------- +Use an `index.yaml` file located within `~/.atomicapp/index.yaml` for outputting a +series of featured Nuleculized applications + +``` +ID VER PROVIDERS LOCATION +postgresql-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/postgresql-centos7-atomicapp +flask_redis_nulecule 0.0.1 {D,K} docker.io/projectatomic/flask-redis-centos7-atomicapp +redis-atomicapp 0.0.1 {D,O,K} docker.io/projectatomic/redis-centos7-atomicapp +... +``` + +`fetch` +------- +Will download and combine artifacts from the target application and any +dependent applications including sample answers.conf file into a local +directory for inspection and/or modification. This is the same for all providers. + +`run` +----- +Will run an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Run application containers on local machine. | +| Kubernetes | Run requested application in Kubernetes target environment. | +| Openshift | Run requested application in OpenShift target environment. | +| Marathon | Run requested application in Marathon target environment. | + +`stop` +------ +Will stop an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Stop application containers on local machine. | +| Kubernetes | Stop requested application in Kubernetes target environment. | +| Openshift | Stop requested application in OpenShift target environment. | +| Marathon | Stop requested application in Marathon target environment. | + ## Providers Providers may be specified using the `answers.conf` file or the `--provider ` option. diff --git a/docs/file_handling.md b/docs/file_handling.md index 3e4f885f..ec0e15e5 100644 --- a/docs/file_handling.md +++ b/docs/file_handling.md @@ -1,6 +1,11 @@ -## Install +## Fetch -Installing an Atomic App means to download the artifacts and sample answerfile. +Fetching an Atomic App means to download the metadata files: artifacts and +sample answerfile for an Atomic App. By default, it downloads the metadata +files for the atomicapp to a directory of the form +``/var/lib/atomicapp/-``. If needed, +you can also specify a target directory to download the metadata for the +Atomic App using the ``--destination`` option. ## Developing and Debugging @@ -8,11 +13,31 @@ Image developers may run the root container and point to a Nulecule directory on ## Directories -* `/tmp/`: Host directory for temporary Nulecule files. May be overridden. -* `/tmp//.workdir`: Host directory for artifact template files with variable substitution. +* `/var/lib/atomicapp/-`: This is where an Atomic App + and it's dependencies are fetched when fetching or running the Atomic App, + unless, a specific destination is specified. +* `/var/lib/atomicapp/-/external`: + External Atomic Apps, if any, for the given Atomic App are + fetched into ``external`` directory inside the directory of + the Atomic App, during, fetching the Atomic App with + dependencies or running the Atomic App. ## Artifact path Local path to an artifact file or a directory containing artifact files as its immediate children. +## Runtime answers file + +When running an Atomic App, it asks the users for missing values for +parameters defined in the Atomic App and it's child Atomic Apps. This +aggregated answers data is used to run the Atomic App, and is dumped +to a file: ``answers.conf.gen`` in the Atomic App's directory, to be +used later when stopping the Atomic App. + +## Rendered artifact files + +Artifact files are rendered with runtime answers data along side the original +artifact files, but with the filenames prefixed with a `.` (dot), to make +them hidden. + diff --git a/docs/images/logo.png b/docs/images/logo.png new file mode 100644 index 00000000..a28fc9c0 Binary files /dev/null and b/docs/images/logo.png differ diff --git a/docs/nulecule.md b/docs/nulecule.md new file mode 100644 index 00000000..255bee92 --- /dev/null +++ b/docs/nulecule.md @@ -0,0 +1,240 @@ +# Nulecule file + +Atomic App implements version `0.0.2` of the [Nulecule specification](https://github.com/projectatomic/nulecule/tree/master/spec). + +A `Nulecule` file format can either be `json` or `yaml`. + +### Data types + +Common Name | `type` | `format` | Comments +----------- | --------- | ----------- | -------------- +integer | `integer` | `int32` | signed 64 bits +float | `number` | `float` | +string | `string` | | +byte | `string` | `byte` | +boolean | `boolean` | | +date | `string` | `date` | As defined by `full-date` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +dateTime | `string` | `date-time` | As defined by `date-time` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +password | `string` | `password` | Used to hint UIs the input needs to be obscured. +URL | `URL` | `URL` | As defined by `URL` - [RFC3986 Section 1.1.3](https://tools.ietf.org/html/rfc3986#section-1.1.3) + +### Nulecule file schema + +#### Container Application Object +This is the root object for the specification. + +Field Name | Type | Description +---------- | :-----------------: | ------------ +id | `string` | **Required.** The machine readable id of the Container Application. +specversion | `string` | **Required.** The semantic version string of the Container Application Specification used to describe the app. The value MUST be `"0.0.2"` (current version of the spec). +metadata | `Metadata Object` | **Optional** An object holding optional metadata related for the Container Application, this may include license information or human readable information. +graph | `Graph Object` | **Required.** A list of depending containerapps. Strings may either match a local sub directory or another containerapp-spec compliant containerapp image that can be pulled via docker. +requirements|`Requirements Object`| **Optional.** A list of requirements of this containerapp. + + +#### Metadata Object + +Metadata for the Container Application. + +##### Fields + +Field Name | Type | Description +---------------|:---------------:| ------------ +name | `string` | **Optional.** A human readable name of the containerapp. +appversion | `string` | **Optional.** The semantic version string of the Container Application. +description | `string` | **Optional.** A human readable description of the Container Application. This may contain information for the deployer of the containerapp. +license | `License Object`| **Optional.** The license information for the containerapp. +arbitrary_data | `string` | **Optional.** Arbitrary `key: value` pair(s) of metadata. May contain nested objects. + +##### Metadata Object Example + +```yaml +metadata: + name: myapp + appversion: 1.0.0 + description: description of myapp + foo: bar + othermetadata: + foo: bar + files: file://path/to/local/file +... +``` + +#### License Object + +License information for the Container Application. + +##### Fields + +Field Name | Type | Description +-----------|:--------:|--- +name | `string` | **Required.** The human readable license name used for the Container Application, no format imposed. +url | `string` | **Optional.** A URL to the license used for the API. MUST be in the format of a URL. + +##### License Object Example + +```yaml +license: + - name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html +``` + +#### Graph Object + +The graph is a list of items (containerapps) the Container Application depends on. + +##### Fields + +Field Name| Type | Description +----------|:-----------------:|------------- +name | `string` | **Required.** The name of the depending Container Application. +source | `docker://` | **Optional.** `docker://` source location of the Container Application, the source MUST be prefixed by `docker://`. If source is present, all other fields SHALL be ignored. +params | `Params Object` | **Optional.** A list of `Params Objects` that contain provider specific information. If params is present, source field SHALL be ignored. +artifacts | `Artifact Object` | **Optional.** A list of `Artifact Objects` that contain provider specific information. If artifacts is present, source field SHALL be ignored. + +##### Graph Item Object Example: + +```yaml +params: + - name: mariadb-centos7-atomicapp + source: docker://projectatomic/mariadb-centos7-atomicapp + ... +``` + +If no `artifacts` are specified, then an external Atomic App is pulled and installed from the `docker://` source. + +#### Parameters Object + +A list of Parameters the containerapp requires. Defaults may be set, otherwise user input is required. + +##### Fields + +Field Name | Type | Description +------------|:-----------------:|------------- +name | `string` | **Required.** The name of the parameter. +description | `string` | **Required.** A human readable description of the parameter. +default | `string` | **Optional.** An optional default value for the parameter. + +##### Parameters Object Example: + +```yaml +params: + - name: image + description: wordpress image + default: wordpress + ... +``` + +#### Requirements Object + +The list of requirements of the Container Application. + +Field Name | Type | Description +---------------- | :-----------------------: | ------------ +persistentVolume | `Persisent Volume Object` | **Optional.** An object that holds an array of persistent volumes. + +#### Persistent Volume Object + +This describes a requirement for persistent, read-only or read-write storage that should be available to the containerapp on runtime. The name of this object MUST be `"persistentVolume"`. + +Despite the name, within __Kubernetes__ and __OpenShift__ this acts as a [PersistentVolumeClaim](http://kubernetes.io/v1.1/docs/user-guide/persistent-volumes.html). + +Persistent Volume is only available for the following providers: __kubernetes__ + +##### Fields + +Field Name | Type | Description +---------------- | :-------: | ------------ +name | `string` | **Required.** A name associated with the storage requirement. +accessMode | `string` | **Required.** Must be either: __ReadWriteOnce__, __ReadOnlyMany__ or __ReadWriteMany__. +size | `integer` | **Required.** Size of the volume claim. + +##### Persistent Volume Example + +```yaml +requirements: + - persistentVolume: + name: "var-log-http" + accessMode: "ReadWriteOnce" + size: 4 + - persistentVolume: + name: "var-log-https" + accessMode: "ReadOnlyMany" + size: 4 + ... +``` + +#### Artifacts Object + +The Artifacts Object describes a list of provider specific artifact items. These artifact items will be used during the installation of the containerapp to deploy to the provider. Each provider key contains a list of artifacts. + +Each artifact is a file location relative to the `Nulecule` file. + +__Optionally,__ you may _inherit_ from another compatible provider. + +##### Artifacts Example: + +```yaml +graph: + ... + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + openshift: + - inherit: + - kubernetes + ... +``` + +# Full example + +This is a full example of __all__ features of the Nulecule file. This is only used as an example and _does not necessarily work as intended_. + +```yaml +--- +specversion: 0.0.2 +id: helloworld + +metadata: + name: Hello World + appversion: 0.0.1 + description: Hello earth! + license: + - name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + foo: bar + othermetadata: + foo: bar + files: file://path/to/local/file + +graph: + - name: mariadb-centos7-atomicapp + source: docker://projectatomic/mariadb-centos7-atomicapp + + - name: helloapache-app + params: + - name: image + description: The webserver image + default: centos/httpd + - name: hostport + description: The host TCP port as the external endpoint + default: 80 + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + openshift: + - inherit: + - kubernetes + marathon: + - file://artifacts/marathon/helloapache.json + +requirements: + - persistentVolume: + name: "var-log-httpd" + accessMode: "ReadWriteOnce" + size: 4 +``` diff --git a/docs/providers.md b/docs/providers.md index 2058095e..0726b49c 100644 --- a/docs/providers.md +++ b/docs/providers.md @@ -1,6 +1,6 @@ # Providers -This chapter includes linkes to documentation on how to use and configure the +This chapter includes links to documentation on how to use and configure the providers that are supported by Atomic App. The linked documentation will give you a short overview of all available providers and how to use them. diff --git a/docs/providers/docker/overview.md b/docs/providers/docker/overview.md index a519a087..925bdd03 100644 --- a/docs/providers/docker/overview.md +++ b/docs/providers/docker/overview.md @@ -30,9 +30,9 @@ answers.conf file. An example is below: namespace: mynamespace ``` -#### providerconfig +#### provider-config This communicates directly with the docker daemon on the host. It does -not use the `providerconfig` option. +not use the `provider-config` option. #### Configuration Value Defaults diff --git a/docs/providers/kubernetes/overview.md b/docs/providers/kubernetes/overview.md index 6fe9597a..f8cc8d07 100644 --- a/docs/providers/kubernetes/overview.md +++ b/docs/providers/kubernetes/overview.md @@ -28,22 +28,22 @@ section of the answers.conf file. An example is below: namespace: mynamespace ``` -#### providerconfig +#### provider-config -For Kubernetes the configuration file as specified by `providerconfig` +For Kubernetes the configuration file as specified by `provider-config` is optional. Hosts that have kubernetes set up and running on them -may not need a `providerconfig` to be specified because kubernetes +may not need a `provider-config` to be specified because kubernetes services are listening on default ports/addresses. However, if kubernetes was set up to listen on different ports, or you wish to connect to a remote kubernetes environment, then you will need to specify a location for a provider config file. -One example of specifying a `providerconfig` is below: +One example of specifying a `provider-config` is below: ``` [general] provider: kubernetes -providerconfig: /host/home/foo/.kube/config +provider-config: /home/foo/.kube/config ``` #### Configuration Value Defaults @@ -53,7 +53,7 @@ Table 1. Kubernetes default configuration values Keyword | Required | Description | Default value ---------|----------|---------------------------------------------------------|-------------- namespace| no | namespace to use with each kubectl call | default -providerconfig| no | config file that specifies how to connect to kubernetes | none +provider-config| no | config file that specifies how to connect to kubernetes | none ### Operations diff --git a/docs/providers/marathon/overview.md b/docs/providers/marathon/overview.md index 5a360252..f38b69ae 100644 --- a/docs/providers/marathon/overview.md +++ b/docs/providers/marathon/overview.md @@ -6,15 +6,15 @@ The Marathon provider will deploy an application into Mesos cluster using Marathon scheduler. ### Configuration -This provider requires configuration (`providerapi`) to be able to connect to Marathon API. -If no `providerapi` is specified it will use `http://localhost:8080` as Marathon API url. +This provider requires configuration (`provider-api`) to be able to connect to Marathon API. +If no `provider-api` is specified it will use `http://localhost:8080` as Marathon API url. This configuration can be provided in the `answers.conf` file. Example: [general] provider=marathon - providerapi=http://10.0.2.15:8080 + provider-api=http://10.0.2.15:8080 #### Configuration values @@ -22,7 +22,7 @@ Table 1. Marathon default configuration values Keyword | Required | Description | Default value ------------|----------|---------------------------------------------|-------------------------- -providerapi | no | url for Marathon REST API | `http://localhost:8080` +provider-api | no | url for Marathon REST API | `http://localhost:8080` ### Operations diff --git a/docs/providers/openshift/overview_atomic_app.md b/docs/providers/openshift/overview_atomic_app.md index 06a8624d..6793cdb9 100644 --- a/docs/providers/openshift/overview_atomic_app.md +++ b/docs/providers/openshift/overview_atomic_app.md @@ -11,8 +11,8 @@ the application. One piece of the puzzle is telling Atomic App how to communicate with the OpenShift master. This can be done in one of two ways. -1. Passing in the `providerconfig` value in answers.conf -2. Passing in both the `providerapi` and `accesstoken` values in answers.conf. +1. Passing in the `provider-config` value in answers.conf +2. Passing in both the `provider-api` and `provider-auth` values in answers.conf. These config items are detailed below. @@ -34,27 +34,27 @@ namespace: mynamespace **NOTE**: If there is a namespace value set in the artifact metadata then that value will always be used and won't be overridden. -#### providerconfig +#### provider-config For OpenShift, one way to let Atomic App know how to communicate with the master is by re-using the provider config file that already exists on a user's machine. Basically whatever the user can do with the `oc` command, Atomic App can do by re-using the same provider config. -One example of specifying a `providerconfig` is below: +One example of specifying a `provider-config` is below: ``` [general] provider = openshift -providerconfig = /home/user/.kube/config +provider-config = /home/user/.kube/config ``` -#### providerapi + accesstoken +#### provider-api + provider-auth Another to pass credential information in is by passing in both the location where the openshift API is being served, as well as the access token that can be used to authenticate. These are done with the -`providerapi` and `accesstoken` config variables in the `[general]` +`provider-api` and `provider-auth` config variables in the `[general]` section within answers.conf. An example of this is below: @@ -62,8 +62,8 @@ An example of this is below: ``` [general] provider = openshift -providerapi = https://10.1.2.2:8443 -accesstoken = sadfasdfasfasfdasfasfasdfsafasfd +provider-api = https://10.1.2.2:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd namespace = mynamespace ``` @@ -71,29 +71,29 @@ namespace = mynamespace `oc whoami -t` or if you are not using `oc` client you can get it via web browser on `https:///oauth/token/request` -#### providertlsverify -If `providerapi` is using https protocol you can optionally +#### provider-tlsverify +If `provider-api` is using https protocol you can optionally disable verification of tls/ssl certificates. This can be especially useful when using self-signed certificates. ``` [general] provider = openshift -providerapi = https://127.0.0.1:8443 -accesstoken = sadfasdfasfasfdasfasfasdfsafasfd +provider-api = https://127.0.0.1:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd namespace = mynamespace -providertlsverify = False +provider-tlsverify = False ``` -**NOTE**: If `providerconfig` is used values of `providertlsverify` -and `providercafile` are set according to settings in `providerconfig` file. +**NOTE**: If `provider-config` is used values of `provider-tlsverify` +and `provider-cafile` are set according to settings in `provider-config` file. -#### providercafile -If `providerapi` is using https protocol you can optionally specify +#### provider-cafile +If `provider-api` is using https protocol you can optionally specify path to a CA_BAUNDLE file or directory with certificates of trusted CAs. -**NOTE**: If `providerconfig` is used values of `providertlsverify` -and `providercafile` are set according to settings in `providerconfig` file. +**NOTE**: If `provider-config` is used values of `provider-tlsverify` +and `provider-cafile` are set according to settings in `provider-config` file. #### Configuration Value Defaults @@ -103,15 +103,15 @@ Table 1. OpenShift default configuration values Keyword | Required | Description | Default value ---------|----------|---------------------------------------------------------|-------------- namespace| no | namespace to use with each kubectl call | default -providerconfig| no | config file that specifies how to connect to kubernetes | none -providerapi| no | the API endpoint where API requests can be sent | none -accesstoken| no | the access token that can be used to authenticate | none -providertlsverify|no| turn off verificatoin of tls/ssl certificates | False -providercafile| no | path to file or directory with trusted CAs | none +provider-config| no | config file that specifies how to connect to kubernetes | none +provider-api| no | the API endpoint where API requests can be sent | none +provider-auth| no | the access token that can be used to authenticate | none +provider-tlsverify|no| turn off verificatoin of tls/ssl certificates | False +provider-cafile| no | path to file or directory with trusted CAs | none -**NOTE**: One of `providerconfig` or `providerapi` + `accesstoken` are required +**NOTE**: One of `provider-config` or `provider-api` + `provider-auth` are required -**NOTE**: Namespace can be set in the file pointed to by `providerconfig` or +**NOTE**: Namespace can be set in the file pointed to by `provider-config` or in the `answers.conf`. If it is set in both places then the values must match, or an error will be reported. diff --git a/docs/providers/openshift/overview_native.md b/docs/providers/openshift/overview_native.md index a5fc56cf..f214a999 100644 --- a/docs/providers/openshift/overview_native.md +++ b/docs/providers/openshift/overview_native.md @@ -20,7 +20,7 @@ from the environment of the installation container that is used to bootstrap the start of the application. It is not necessary to provide the namespace in the config. -#### providerconfig / providerapi / access_token +#### provider-config / provider-api / provider-auth At the time of execution, the Atomic App container is already running inside of the openshift environment and has access to the credentials diff --git a/docs/quick_start.md b/docs/quick_start.md new file mode 100644 index 00000000..92e61648 --- /dev/null +++ b/docs/quick_start.md @@ -0,0 +1,153 @@ +## Using Atomic App + +Prerequisite: Before proceeding, make sure you either have a Kubernetes, OpenShift or Docker environment setup and ready. + +You can either use Atomic App on your own OS or in a container via the `atomic` command on Atomic hosts. + +In order to use Atomic App on Project Atomic hosts we use the `INSTALL` and `RUN` label functionality with [atomic cli](https://github.com/projectatomic/atomic). + +With the exception of the `atomic stop` command all functionality is essentially the same. + +### Quickstart: Atomic App on bare metal + +__Running Apache on Docker:__ +```sh +▶ sudo atomicapp run projectatomic/helloapache --provider=docker +2016-02-25 16:06:38,298 - [INFO] - main.py - Action/Mode Selected is: run +2016-02-25 16:06:38,299 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-7c32c1632a7b +2016-02-25 16:06:41,904 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-02-25 16:06:41,904 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-7c32c1632a7b +20af2e6e33d10d26aa98d6e63c70de5fd55bfe14b9cc782e1312afe441ef7130 +2016-02-25 16:06:42,231 - [INFO] - docker.py - Deploying to provider: Docker +5d6938439d50c21251507b26c73f5e65f102f2b99e183002ef2ec21414c4ee78 + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-7c32c1632a7b +Please use this directory for managing your application + +▶ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +5d6938439d50 centos/httpd "/run-httpd.sh" 3 seconds ago Up 1 seconds 0.0.0.0:80->80/tcp default_centos-httpd_ec75a2fe2a50 +``` + +__Runnning Apache on Kubernetes:__ +```sh +▶ sudo atomicapp run projectatomic/helloapache +2016-02-25 15:03:04,341 - [INFO] - main.py - Action/Mode Selected is: run +2016-02-25 15:03:04,343 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +2016-02-25 15:03:07,983 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-02-25 15:03:07,984 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +886e10a3244f982f3302ab9058ab7b377c6f83e2cf63f001e1ba011358d0b471 +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - Using namespace default +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - trying kubectl at /usr/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - trying kubectl at /usr/local/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - found kubectl at /usr/local/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - Deploying to Kubernetes +... + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +Please use this directory for managing your application + +▶ kubectl get po +NAME READY STATUS RESTARTS AGE +helloapache 1/1 Running 0 2m +k8s-etcd-127.0.0.1 1/1 Running 0 1d +k8s-master-127.0.0.1 4/4 Running 0 1d +k8s-proxy-127.0.0.1 1/1 Running 0 1d +``` + +__Fetch, edit and run Apache on Kubernetes:__ +```sh +▶ mkdir ./localdir + +▶ sudo atomicapp fetch projectatomic/helloapache --destination ./localdir/ +2016-02-25 15:35:41,439 - [INFO] - main.py - Action/Mode Selected is: fetch +2016-02-25 15:35:41,440 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to helloapache +2016-02-25 15:35:45,067 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-02-25 15:35:45,067 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to helloapache +c12d2047fab44f2906b9cbee3ac86c6c6499921ce33a90085e8765491b44f447 + +Your application resides in localdir +Please use this directory for managing your application + +▶ cd localdir + +▶ cat Nulecule +... + - name: hostport + description: The host TCP port as the external endpoint + default: 80 +... + +▶ vim Nulecule # edit port 80 to 8080 + +▶ cat Nulecule +... + - name: hostport + description: The host TCP port as the external endpoint + default: 8080 +... + +▶ sudo atomicapp run . + +OR + +▶ docker build -t myapp +▶ sudo atomicapp run myapp +``` + +### Quickstart: Atomic App on Atomic Host + +__Running Apache on Docker:__ +```sh +▶ sudo atomic run projectatomic/helloapache --provider=docker +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run --provider=docker +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run --provider=docker +2016-03-01 20:54:37,617 - [INFO] - main.py - Action/Mode Selected is: run +2016-03-01 20:54:37,618 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +2016-03-01 20:54:38,357 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-03-01 20:54:38,358 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +6eedd332f9938c7b4bacca694fdc77309ca5b43aabb05a1cb644ff8a0b713012 +2016-03-01 20:54:38,558 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:54:38,558 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:54:38,602 - [INFO] - docker.py - Deploying to provider: Docker +a98d9a3305496803c38a90a9ef65c52030dc23dae4b04f36ce167ff98335395f + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +Please use this directory for managing your application +``` + +__Runnning Apache on Kubernetes:__ +```sh +▶ sudo atomic run projectatomic/helloapache +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run +2016-03-01 20:58:03,396 - [INFO] - main.py - Action/Mode Selected is: run +2016-03-01 20:58:03,397 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +2016-03-01 20:58:04,153 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-03-01 20:58:04,153 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +c85cbb2d28857f2b283e23a72a70e077daceeb2b72f6964605af6f7efa8fbc2f +2016-03-01 20:58:04,387 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:58:04,388 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - Using namespace default +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - trying kubectl at /host/usr/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - trying kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - found kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - Deploying to Kubernetes + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +Please use this directory for managing your application +``` + +__Stopping Apache on Kubernetes:__ +```sh +▶ sudo atomic stop projectatomic/helloapache /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache stop /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +2016-03-01 20:59:57,067 - [INFO] - main.py - Action/Mode Selected is: stop +2016-03-01 20:59:57,075 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:59:57,075 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - Using namespace default +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - trying kubectl at /host/usr/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - trying kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - found kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - Undeploying from Kubernetes +``` diff --git a/docs/spec/GETTING_STARTED.md b/docs/spec/GETTING_STARTED.md new file mode 100644 index 00000000..a24f5554 --- /dev/null +++ b/docs/spec/GETTING_STARTED.md @@ -0,0 +1,90 @@ +# Getting Started with Nulecule + +You have an application you want to package up as a Nulecule for distribution. It's composed of one or more containers that together link to provide your application. + +## Plan +1. Determine what components of your application are custom and which are "stock" parts. For example, do you need a custom web server, or do you just need to load a specific configuration onto an already packaged web server. +1. Find your resources. + - **nulecule applications** Are there existing Nulecule Applications you can leverage in your own application? + - **container images** Carefully consider if you really need to build your own containers. For example, do you really need your own web server or database image? If you're writing a Dockerfile for a common service, try to find a well-known, supported, certified, stable image that you can build on. + - **provider orchestration templates** When you are considering how to provide configuration for orchestration providers, such as kubernetes files (service, replication controller, pod) or OpenShift or Docker Compose files, see if you can use exising templates or known good files. As with container images, if you're writing files for common services, try to find well-known, supported, certified, stable templates that you can build on. + +## Prepare +From the planning phase, you've got a collection of remote and local sources that your application will be comprised of. + +1. Start with the containers. Understand how they run standalone. Get them running. Make sure the entire application runs manually. +1. Orchestrate the containers on the target provider. Start simply and build up. For example, with kubernetes just deploy as a pod. Once that succeeds, add a service, and then some replication controllers. There are many opportunities for error -- so make small changes, test and iterate slowly. Verify your [YAML](http://codebeautify.org/yaml-validator) or [JSON](http://jsonlint.com/) frequently. Use a method that can be easily incorporated into your development workflow: small change -> save -> validate -> test -> rinse and repeat. +1. Test both custom and stock services together. Nulecule won't do magical things. The pieces must all work together before they can be packaged up as a unit. + +## Package +Only when everything is working are you ready to package the application. In this phase you'll be interacting with the [Nulecule specification](/spec). + +1. Download a [Nulecule template](/spec/examples/template) to start from. +1. In the Nulecule file, create one or more lists of things under `graph`. These represent the different components that make up your application. Names are arbitrary. Remember to verify your [YAML](http://codebeautify.org/yaml-validator) or [JSON](http://jsonlint.com/) frequently. + + 1. If your sources are remote, then all that is needed is a name and source. Remote sources are other Nulecule applications. + + graph: + - name: mydb + source: "docker://registry.example.com/some/database" + 1. If your sources are local, then provide a name and an artifacts key that will reference the source file(s). Each provider will have a key specifying the provider. For example, "docker" or "kubernetes". + + graph: + - name: myapp + artifacts: + kubernetes: + - file:///artifacts/kubernetes/pod.json + - file:///artifacts/kubernetes/service.json + +1. Put all of the provider files into a directory structure that corresponds to the provider artifacts section in the Nulecule file. Using the above example, `artifacts/kubernetes/.json`. The structure should resemble something like this: + + ├── Dockerfile + ├── artifacts + │   └── kubernetes + │   ├── pod.json + │   └── service.json + ├── Nulecule + └── README.md + +1. Consider the different ways your application may be deployed. There will likely be many parameters that need to be exposed at deployment. It's best to overdo this and provide defaults whenever possible. Go through the provider files and change any values. For example `database_pass: changeme` becomes `database_pass: $db_pass`. The name of the parameter is `db_pass`. These go into the params section of the Nulecule file under each provider. For example: + + + graph: + - mydb: + ... + params: + - name: db_pass + description: database passphrase + - name: port + description: frontend TCP port + default: 80 + +1. Consider any additional information that is useful for deployment. Write a README file focused on deployment. Use a popular format such as Markdown or asciidoc so it can be read from a terminal window or rendered in a graphical interface. + * what does this application do? + * what provider environment(s) do I need to have setup before I deploy it? + * how do I verify that it has been deployed correctly? + +1. Add a metadata section, including a name, description and license information. Arbitrary metadata may also be added. Consider using keyword tags that may be useful for deployment management. For example: + + metadata: + name: My Cool App + appversion: 1.0.0 + description: Lorem ipsum dolor sit amet, consectetur adipiscing elit + license: + name: GPLv3 + url: http://www.example.com/license + tags: + - foo + - bar + +1. Before packaging up into a container, try running it in a test mode if one is provided by your Nulecule implementation. If you are using the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp), use the `dry-run` and `verbose` options as follows: `atomicapp --dry-run --verbose run`. This should output the commands that will run. Common errors: + * provider files don't match the artifact relative path + * yaml or json is not valid + * missing parameter + +1. Once the Nulecule file and provider artifacts are working, package the application as a container. Typically, this means basing it off of an executable image provided by the implementation of Nulecule you are using. If you are using the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp), the stock Dockerfile may be used, unaltered, unless you have a special use case. + + [sudo] docker build -t mydb-app . + +## Push & Pull +Push the image to a registry. Tell people about it and see if they can deploy your application without any assistance. If they have questions, you probably should enhance the application and parameter descriptions so they are clear. diff --git a/docs/spec/GLOSSARY.md b/docs/spec/GLOSSARY.md new file mode 100644 index 00000000..ad89c4fe --- /dev/null +++ b/docs/spec/GLOSSARY.md @@ -0,0 +1,13 @@ +# Nulecule Glossary + +* __Container Image__ - Platform-agnostic term referring to Docker, Rkt or other packaging and transport protocol +* __Layered Image__ - The foundation image of a container plus other tools, applications and content added +* __Association__ of container images to the multi-container Nulecule application: + + __Aggregation__ of one or more discrete container images integral to the operation and coupled to the lifecycle of the Nulecule application - can be another Nulecule Application or container image reference + + __Composition__ refers to one or more container images that are required and tightly coupled to the Nulecule application - can be another Nulecule Application or container image reference +* __Include__ - Refers to the ability to include common resources, parameters or definitions needed to deploy onto a orchestration provider. For example, an OpenShift provider may include the kubernetes provider artifacts and add OpenShift functionality on top of kubernetes capabilities. +* __Provider__ - Plugin interface for specific deployment platform, an orchestration provider +* __Dependency Management__ - Refers to the ability to define order of deployment and managed dependencies including configurable parameters layered on top of stock container images, as well as the providers included in the application definition +* __Directed Graph__ - Declarative representation of dependencies in the context of a multi-container Nulecule application +* __Parameters__ - Variables that can have default values and can be overridden by answerfile.conf + diff --git a/docs/spec/IMPLEMENTATION_GUIDE.md b/docs/spec/IMPLEMENTATION_GUIDE.md new file mode 100644 index 00000000..e6226fb4 --- /dev/null +++ b/docs/spec/IMPLEMENTATION_GUIDE.md @@ -0,0 +1,34 @@ +# Implementation Guide + +This specification has been fully described in the [schema.json](/spec/schema.json) file. Developer and deployment tools should be implemented using this file. + +## Developer Tools + +Developer tooling helps application developers or designers get going quickly. Tools may be template-based or wizard-style tools, command line or graphical interface. When creating a tool for developers decide how much assistance you want to expose for the providers. Each provider has its own documentation and potential tooling but integrating provider features can be a big help to getting something working quickly. + +Wizard-style tools that generate the files for an application require these fields for input: + +* name +* description +* version (application) + +Each "application" component the user wants to define will compose the "graph" for the Nulecule. A component may either be a remote application or defined locally in the directory structure. + +**Remote applications** + +Remote applications are other nulecule container images, for example `someuser/mariadb-app`. No other information is needed. + +**Local applications** + +Local applications are defined by a directory in the graph. These fields are required for input: + +* application name: this is added to the Nulecule graph and creates a directory in the graph. +* provider: a subdirectory of the application directory + +**Providers** + +Provider files may be generated based on some templates. Providing a mechanism to parameterize these files helps the developer understand how parameterization works. For example, if a set of kubernetes template files are pulled in allowing the developer to parameterize some values in the pod file would update the pod file and create a `key = value` pair in the application section of the `params.conf` file. For required values without defaults set the value to `None` in `params.conf`. With this example as a starting point the developer can then easily manipulate parameters by manually editing the files based on the demonstrated pattern. + +## Runtime Tools + +The Reference implementation, Atomic App, coded in python is located at: https://github.com/projectatomic/atomicapp diff --git a/docs/spec/LIFECYCLE.md b/docs/spec/LIFECYCLE.md new file mode 100644 index 00000000..886612b3 --- /dev/null +++ b/docs/spec/LIFECYCLE.md @@ -0,0 +1,54 @@ +# Lifecycle of the Specification + +This document and the processes it describes will become effective starting Nulecule Specification 0.0.2. It is valid until replaced by a newer version or noted otherwise. + +## Normative Document + +The normative Nulecule Specification document will be published at http://www.projectatomic.io/nulecule/spec// +Versioning is using the [semantic versioning scheme](http://semver.org/spec/v2.0.0.html). + +In addition to the human readable HTML document, a JSON formated machine readable version of the specification will be published at the same URL path as the HTML document. The document name will be schema.json and may reference other files using the JSON DRAFT4 references. + +The normative machine readable Nulecule Specification document will be published at https://github.com/projectatomic/nulecule/blob/v/spec//schema.json + +## States + +The Nulecule Specification will have a certain set of releases, we will use semantic versioning to identify the releases. +Prior each release there will be a draft version of the release. This will be used to work/collaborate on the spec itself. + +## Contributors and release process + +Everybody is welcome to contribute to the draft version of the upcoming release. This will be documented by pull +requests (to the github repository of the Nulecule Specification) to the draft of the specification. Once a draft +has stabilized, it will be prepared by the specification maintainers and prepared for release. The maintainers +will release a new release of the specification. + +### Changes to a Releases + +Changes to released versions of the specification will not change the structure or feature set of the specification. +They are only meant to fix spelling or language errors, add or correct examples. + +Collaboration on the draft of the next release of the Nulecule Specification will be done on the master branch of the github +repository of the Nulecule Specification. The release task itself is rather short: the maintainers will tag the repository +and provide the human and machine readable versions of the normative documents. + +## Release tasks + +This chapter will walk you thru the steps to be taken to + + * prepare a draft - so that the community can work on it + * release - so that a new version of the spec is created + +### prepare a draft + +Given the example that the current version of the spec is 0.5.0, collaboration of the specification will continue on the master branch +of https://github.com/projectatomic/nulecule + +### release (move from draft to new version) + +This will bring the draft version of the spec to a released version of the spec: `git tag v0.6.0 -m 'v0.6.0'` After that, one of the maintainers will +publish the human and machine readable files to http://projectatomic.io/nulecule/spec/0.6.0/ + +## Maintainers + +Please see the MAINTAINERS file for a list of maintainers of the Nulecule Specification. diff --git a/docs/spec/NULECULE_FILE.md b/docs/spec/NULECULE_FILE.md new file mode 100644 index 00000000..8a7e0d16 --- /dev/null +++ b/docs/spec/NULECULE_FILE.md @@ -0,0 +1,375 @@ +# Container Application Specification + +**NOTE**: This is a work in progress effort that is expected to change quickly. Feel free to join the initiative! + +#### Version 0.0.2 + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](http://www.ietf.org/rfc/rfc2119.txt). + +The Container Application Specification is licensed under [GNU Free Documentation License Version 1.3, 3 November 2008](https://www.gnu.org/copyleft/fdl.html). + +## Introduction + +The Container Application specification is a project to describe 'an Application' that is composed of a set of dependent Container Applications (containerapp). The Container Application specification defines a set of files required to describe such a containerapp. These files can then be used by other tools to deploy a containerapp. Developers may use other tools to generate most of the required containerapp files. Additional utilities can also take advantage of the resulting files, such as testing tools. + +### Versioning + +Within this specification we follow [the semantic versioning pattern](http://semver.org/spec/v2.0.0.html). + +## Revision History + +Version | Date | Notes +--- | --- | --- +0.0.2 | 2015-05-07 | close issue #35 the graph is now a list of named items +0.0.1-alpha | 2015-mm-dd | TBD +v1-alpha | 2015-04-10 | reversioned to 0.0.1-alpha + +## Examples + +For a list of examples that conform to the spec, check out [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). + + +## Specification + +### Format + +The files describing a containerapp in accordance with the Container Application Specification are represented using [YAML 1.2](http://www.yaml.org/spec/1.2/spec.html) or [JSON](http://json.org/). + +All field names in the specification are **case sensitive**. + +By convention, the containerapp definition file is named `Nulecule`. The Nulecule is the primary file defining the containerapp and it's relationship to dependencies. + +### Data Types + +Primitive data types in the Container Application Specification are based on the types supported by the [JSON-Schema Draft 4](http://json-schema.org/latest/json-schema-core.html#anchor8). + +The formats defined by the Container Application Specification are: + +Common Name | [`type`](#dataTypeType) | [`format`](#dataTypeFormat) | Comments +----------- | ------ | -------- | -------- +integer | `integer` | `int32` | signed 64 bits +float | `number` | `float` | +string | `string` | | +byte | `string` | `byte` | +boolean | `boolean` | | +date | `string` | `date` | As defined by `full-date` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +dateTime | `string` | `date-time` | As defined by `date-time` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +password | `string` | `password` | Used to hint UIs the input needs to be obscured. +URL | `URL` | `URL` | As defined by `URL` - [RFC3986 Section 1.1.3](https://tools.ietf.org/html/rfc3986#section-1.1.3) + +### Terminology + +Container Application + +Provider + + +### Schema + +#### Container Application Object + +This is the root object for the specification. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +id | `string` | **Required.** The machine readable id of the Container Application. +specversion | `string` | **Required.** The semantic version string of the Container Application Specification used to describe the app. The value MUST be `"0.0.2"`. +metadata | [ [MetadataObject](#metadataObject) ] | **Optional** An object holding optional metadata related to the Container Application, this may include license information or human readable information. +params | [ [ParamsObject](#paramsObject) ] | **Optional** A list of [ParamsObject](#paramsObject) that contain provider specific information. +graph | [ [GraphObject](#graphObject) ] | **Required.** A list of depending containerapps. Strings may either match a local sub directory or another containerapp-spec compliant containerapp image that can be pulled via a provider. +requirements | [ [RequirementsObject](#requirementsObject) ] | **Optional** A list of requirements of this containerapp. + + +#### Metadata Object + +Metadata for the Container Application. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Optional** A human readable name of the containerapp. +appversion | `string` | **Optional** The semantic version string of the Container Application. +description | `string` | **Optional** A human readable description of the Container Application. This may contain information for the deployer of the containerapp. +license | [License Object](#licenseObject) | **Optional** The license information for the containerapp. +arbitrary_data | `string` | **Optional** Arbitrary `key: value` pair(s) of metadata. May contain nested objects. + +##### Metadata Object Example: + +```yaml +name: myapp +appversion: 1.0.0 +description: description of myapp +foo: bar +othermetadata: + foo: bar + files: file://path/to/local/file +``` + +```js +{ + "name": "myapp", + "appversion": "1.0.0", + "description": "description of myapp", + "foo": "bar", + "othermetadata": { + "foo": "bar", + "files": "file://path/to/local/file" + } +} +``` + +#### License Object + +License information for the Container Application. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** The human readable license name used for the Container Application, no format imposed. +url | `string` | **Optional** A URL to the license used for the API. MUST be in the format of a URL. + +##### License Object Example: + + +```yaml +name: Apache 2.0 +url: http://www.apache.org/licenses/LICENSE-2.0.html +``` +```js +{ + "name": "GNU GPL, Version 3", + "url": "https://www.gnu.org/copyleft/gpl.html" +} +``` + + +#### Graph Object + +The graph is a list of items (containerapps) the Container Application depends on. + +##### Fields of a Graph Item Object + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** The name of the depending Container Application. +source | `URL` | **Optional** Source location of the Container Application, the source MUST be specified by a valid URL. If source is present, all other fields SHALL be ignored. +params | [ [ParamsObject](#paramsObject) ] | **Optional** A list of [ParamsObject](#paramsObject) that contain provider specific information. If params is present, source field SHALL be ignored. +artifacts | [ [ArtifactsObject](#artifactsObject) ] | **Optional** A list of [ArtifactsObject](#artifactsObject) that contain providr specific information. If artifacts is present, source field SHALL be ignored. + +##### Graph Item Object Example: + +```yaml +--- +name: atomicapp-zabbix-mongodb +source: uri://registry.devops.example.com +# if no "artifacts" is specified, then it is an external Atomic App to be pulled +# and installed from the specified source +``` + +```js +{ +"name": "atomicapp-zabbix-mongodb", +"source": "uri://registry.devops.example.com" +} +``` + +#### Parameters Object + +A list of Parameters the containerapp requires, has set some defaults for or needs user input. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name| `string` | **Required.** The name of the parameter. +description | `string` | **Required.** A human readable description of the parameter. +constraints | [ConstraintObject](#constraintObject) | **Optional** An optional definition of constraints to the parameter. +default | `string` | **Optional** An optional default value for the parameter. +hidden | `string` | **Optional** An optional boolean signifying the parameter should be obscured when displayed. + +##### Parameters Object Example: + +```yaml +name: password +description: mongoDB Admin password +hidden: true +constraints: + - allowed_pattern: "[A-Z0-9]+" + description: Must consist of characters and numbers only. +``` +```js +{ + "name": "password", + "description": "mongoDB Admin password", + "hidden": true, + "constraints": [ + { + "allowed_pattern": "[A-Z0-9]+", + "description": "Must consist of characters and numbers only." + } + ] +} +``` + +#### Constraint Object + +Constraints to the parameter. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +allowed_pattern | `string` | **Required.** A regexp declaring the allowed pattern. +description | `string` | **Required.** A human readable description of the parameter. + + + +#### Requirements Object + +The list of requirements of the Container Application. It may be [Storage Requirement Objects](#storageRequirementsObject) (for a persistent Volume). + + +#### Storage Requirements Object + +This describes a requirement for persistent, read-only or read-write storage that should be available to the containerapp on runtime. The name of this object MUST be `"persistentVolume"`. + +##### Fields of Storage Requirement + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** A name associated with the storage requirement. +accessModes | `string` | **Required.** May be `"ReadWrite"` or `"ReadOnly"`. +size | `integer` | **Required.** Size of required the storage. + +##### Storage Requirement Example: + +```yaml +--- +- persistentVolume: + name: "var-lib-mongodb-data" + accessMode: "ReadWrite" + size: 4 # GB by default +``` +```js + { + "persistentVolume": { + "name": "var-lib-mongodb-data", + "accessMode": "ReadWrite", + "size": 4 + } + } +``` + + +#### Artifacts Object + +The Artifacts Object describes a list of provider specific artifact items. These artifact items will be used during installation of the containerapp to deploy it to the provider. Each provider key contains a list of artifacts. Each artifact list item is either a `URL` string or a [source control repository object](#repositoryObject). + +* URL: must be a URL string prepended by URI type such as `http://`, `https://`, `file:` (relative path) or `file://` (absolute path). URI type `file:` may be a single file or a directory path to multiple files. Directories must end with a trailing slash such as `file:relative/path/to/multiple/artifact/files/`. +* [SourceControlRepositoryObject](#repositoryObject) + +##### Artifacts Example: + +```yaml +--- +artifacts: # list of local or remote files or remote repository path to be processed by the provider selected at install-time + kubernetes: + - source: https://github.com/aweiteka/kube-files.git + tag: release-1 + openshift: + - file:relative/path/openshift/artifacts/ + - https://example.com/openshift/strategies.json + - inherit: + - kubernetes +``` +```js +{ + "artifacts": { + "kubernetes": [ + { + "source": "https://github.com/aweiteka/kube-files.git", + "path": "/artifacts/kubernetes/", + "tag": "release-1" + } + ], + "openshift": [ + "file:relative/path/openshift/artifacts/", + "https://example.com/openshift/strategies.json", + { + "inherit": [ + "kubernetes" + ] + } + ] + } +} +``` + +#### Source Control Repository Object + +Source Control Repository Object for artifact sources. + +##### Fields of a Source Control Repository Object + +Field Name | Type | Description +---|:---:|--- +source | `URL` | **Required** Source location of the source control repository. The source MUST be specified by a valid URL. +path | `string` | **Optional** The path to a specific artifact file or directory of artifact files. Default value is "/" which would reference all of the files in the repository. +type | `string` | **Optional** The source control type. Default value is "git". +branch | `string` | **Optional** The source control branch. Default value is "master". +tag | `string` | **Optional** The source control tag. + + +## Directory Layout + +Names of files that must be present are contained in the file `files` in +the root directory of the specification. These filenames support globbing. + +A filesystem layout of a typical app is this: +``` +├── Nulecule +├── Dockerfile +├── +│ ├── ... +│ └── +└── README.md +``` + +* `Nulecule`: Container Application definition +* `Dockerfile`: standard packaging for this containerapp +* ``: directories of provider-specific files referenced in a containerapp definition file + * `PROVIDER_FILES`: provider-specific files necessary for deploying to provider +* `README.md`: information for deploying this application + + +## README.md + +The README.md is the human-readable document. It describes the containerapp in enough detail so an operator can make parameterization and other deployment decisions. + +NOTE: This is optional. It is possible for some applications to be "self-describing" through well-written descriptions and input validation. + +## Good Practices + +An implementation of the Nulecule Specification should declare what providers it supports. This should be done by adding a Label to the container image, by adding a line to the Dockerfile: +``` +LABEL io.projectatomic.nulecule.providers "kubernetes,docker,openshift" +``` + +## Conventions + +A few conventions are used in the context of Container Applications. + +### Parameters for Providers + +Each provider in the [ArtifactsObject](#artifactsObject) of the [GraphObject](#graphObject) may correspond to a containerapp level [ParamsObject](#paramsObject). + +### Version Label + +The Dockerfile must carry a Label declaring the version of the specification that is used: +``` +LABEL io.projectatomic.nulecule.specversion 0.0.2 +``` diff --git a/docs/spec/README.md b/docs/spec/README.md new file mode 100644 index 00000000..7091e415 --- /dev/null +++ b/docs/spec/README.md @@ -0,0 +1,128 @@ +# Composite Container-based Application Specification + +`\ˈnü-li-ˌkyül\` (n.) a made-up word meaning ["the mother of all atomic particles"](http://simpsons.wikia.com/wiki/Made-up_words). + +**Your installer for container-based applications.** Replace your shell script and deployment instructions with some metadata. + +**Change runtime parameters for different environments.** No need to edit files before deployment. Users can choose interactive or unattended deployment. Guide web interface users with parameter metadata to validate user input and provide descriptive help. + +**Bridge between Enterprise IT and PaaS** With pluggable orchestration providers you can package your application to run on OpenShift, Kubernetes, Docker Compose, Helios, Panamax, Docker Machine, etc. and allow the user to choose the target when deployed. + +**Compose applications from a catalog.** No need to re-package common services. Create composite applications by referencing other Nulecule-compliant apps. For example, adding a well-designed, orchestrated database is simply a reference to another container image. + +## Problem Statement +Currently there is no standard way of defining a multi-container application's configuration without distributing instructions and files to the end-user. Additionally, these files must be managed and distributed via different systems than the containers themselves. + +Containers in the OCI (Open Container Initiative) format derived from Docker offers a new approach for application packaging. OCI enables application-centric aggregate packaging, optimized for deployment into containers. However most applications will consist of multiple containers, which surfaces two issues: the relationships between containers need to be expressed in order to manage dependencies and orchestrate the deployment (e.g. set up network connections) with consideration of environmental factors, and this application-level meta-data needs to be distributed. OCI itself, however, stops at the individual container. Orchestration tools such as Kubernetes offer a generic description model for multi-container applications, however they do not define a transport model, nor a standard way to parameterize a generic template. The mindset of most, if not all, current container orchestration systems is to treat the aggregate, multi-container application as state of the cluster rather than an entity in it's own right and therefore they regress beyond the portability that OCI introduced. This means that it's very easy to put a individual service into a Docker-style Registry, however there is no way to represent a full application at the distribution level - I can create a single MariaDB container, but not a MariaDB/Galera cluster or even a full application such as [Kolab](https://kolab.org/). So what is missing? A standard way to describe and package a multi-container application. + +## What is Nulecule? + +Nulecule defines a pattern and model for packaging complex multi-container applications and services, referencing all their dependencies, including orchestration metadata in a container image for building, deploying, monitoring, and active management. + +The Nulecule specification enables complex applications to be defined, packaged and distributed using standard container technologies. The resulting container includes dependencies, supports multiple orchestration providers, and has the ability to specify resource requirements. The Nulecule specification also supports the aggregation of multiple composite applications. The Nulecule specification is container and orchestration agnostic, enabling the use of any container and orchestration technology. + +**[Glossary of terms](GLOSSARY.md)** + +## Nulecule Specification Highlights + +* Application description and context maintained in a single container through extensible metadata +* Composable definition of complex applications through inheritance and composition of containers into a single, standards-based, portable description. +* Simplified dependency management for the most complex applications through a directed graph to reflect relationships. +* Container and orchestration engine agnostic, enabling the use of any container technology and/or orchestration technology + +Detailed explanation on the **Nulecule** file-format is explained at [NULECULE_FILE.md](NULECULE_FILE.md). + +## “The Big Picture” + +![Alt Nulecule specification high-level story.](/docs//images/logo.png "Nulecule specification high-level story") + +## Deployment User Experience + +The Nulecule specification has been implemented in the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp). Atomic App currently supports docker containers and kubernetes and docker orchestration providers. The [atomic command](https://github.com/projectatomic/atomic) is used to run the container that contains the Nulecule specification and the Atomic App implementation. + +This example is a single container application based on the centos/httpd image, but you can use your own. + +You may wish to run the Nulecule from an empty directory as it will copy the Nulecule files to the working directory for inspection every time it is run. + +### Option 1: Non-interactive defaults + +Run the image. It will automatically use kubernetes as the orchestration provider. This will become interactive and prompt for defaults if the Nulecule file doesn't provide defaults for all of the parameters. + +``` +[sudo] atomic run projectatomic/helloapache +``` + +### Option 2: Unattended + +1. Create the file `answers.conf` with these contents: + + This sets up the values for the two configurable parameters (image and hostport) and indicates that kubernetes should be the orchestration provider. + + [general] + provider = kubernetes + + [helloapache-app] + image = centos/httpd # optional: choose a different image + hostport = 80 # optional: choose a different port to expose +1. Run the application from the current working directory + + $ [sudo] atomic run projectatomic/helloapache + ... + helloapache + + +1. As an additional experiment, remove the kubernetes pod and change the provider to 'docker' and re-run the application to see it get deployed on native docker. + +### Option 3: Install and Run + +You may want to download the application, review the configuration and parameters as specified in the Nulecule file, and edit the answerfile before running the application. + +1. Download the application files using `atomic install` + + [sudo] atomic install projectatomic/helloapache + +1. Rename `answers.conf.sample` + + mv answers.conf.sample answers.conf + +1. Edit `answers.conf`, review files if desired and then run + + $ [sudo] atomic run projectatomic/helloapache + ... + helloapache + +## Test +Any of these approaches should create a kubernetes pod or a running docker container. + +With a kubernetes pod, once its state is "Running" curl the minion it's running on. + +``` +$ kubectl get pod helloapache +POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS +helloapache 172.17.0.8 helloapache centos/httpd 10.3.9.216/ name=helloapache Running +$ curl 10.3.9.216 + +``` + +If you test the docker provider, once the container is running, curl the port on your localhost. + +``` +$ curl localhost + +``` + +Additional examples that conform to the Nulecule spec can be found at [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). + +## Developer User Experience + +See the [Getting Started with Nulecule guide](GETTING_STARTED.md). + +## Implementations + +This is only a specification. Implementations may be written in any language. See [implementation guide](IMPLEMENTATION_GUIDE.md) + +**Reference implementation** https://github.com/projectatomic/atomicapp + +## Examples / Library + +For a library of examples conforming to the current reference implementation [atomicapp](https://github.com/projectatomic/atomicapp) please visit [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library) diff --git a/docs/spec/json/constraint.json b/docs/spec/json/constraint.json new file mode 100644 index 00000000..6d7f7478 --- /dev/null +++ b/docs/spec/json/constraint.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Constraint", + "description": "Constraint to the parameter.", + "type": "array", + "items": { + "type": "object", + "required": [ "allowed_pattern", "description" ], + "properties": { + "allowed_pattern": { + "description": "A regular expression pattern.", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the constraint.", + "type": "string", + "default": "null" + } + } + } +} diff --git a/docs/spec/json/files b/docs/spec/json/files new file mode 100644 index 00000000..9c3dd392 --- /dev/null +++ b/docs/spec/json/files @@ -0,0 +1,2 @@ +Dockerfile +Nulecule diff --git a/docs/spec/json/graph.json b/docs/spec/json/graph.json new file mode 100644 index 00000000..a53949a8 --- /dev/null +++ b/docs/spec/json/graph.json @@ -0,0 +1,44 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Graph", + "description": "A list of components that constitute this Container Application. Components are either container-based services or other Container Applications. Components that are container-based services are specified as a collection of artifacts for providers that can accept a parameters specified by the deployer. These artifacts are references to files located in local sub-directories. Components that are other Container Applications are specified as URLs.", + "type": "array", + "items" : { + "$ref": "#/definitions/component" + }, + + "definitions": { + "component": { + "description": "ID of a component", + "type": "object", + "required": [ "name" ], + "properties": { + "name": { + "description": "The name of the component.", + "type": "string", + "default": "null" + }, + "source": { + "description": "If the component is another Container Application, source MUST be a valid URL to the source location. If source is present, all other fields SHALL be ignored.", + "type": "string", + "default": "null" + }, + "params": { + "description": "A list of ParamsObject that contain information to be used by providers in conjunction with their ArtifactsObject. If params is present, the source field SHALL be ignored.", + "type": "array", + "items": { + "$ref": "file:param.json" + } + }, + "artifacts": { + "description": "A list of ArtifactsObject that contain provider specific information. If artifacts is present, the source field SHALL be ignored.", + "type": "object", + "additionalProperties": { + "$ref": "file:provider.json" + } + } + } + } + } +} diff --git a/docs/spec/json/license.json b/docs/spec/json/license.json new file mode 100644 index 00000000..1fae538e --- /dev/null +++ b/docs/spec/json/license.json @@ -0,0 +1,20 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "License", + "description": "License information for the Container Application.", + "type": "object", + "required": [ "name" ], + "properties": { + "name": { + "description": "The human readable license name used for the Container Application, no format imposed.", + "type": "string", + "default": "null" + }, + "url": { + "description": "A URL to the license used for the API. MUST be in the format of a URL.", + "type": "string", + "default": "null" + } + } +} diff --git a/docs/spec/json/metadata.json b/docs/spec/json/metadata.json new file mode 100644 index 00000000..30c3388c --- /dev/null +++ b/docs/spec/json/metadata.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Metadata", + "description": "An object holding optional metadata related to the Container Application. This will typically include the container application name, version, description, license information and other human readable information.", + "type": "object", + "properties": { + "name": { + "description": "A human readable name of the containerapp.", + "type": "string", + "default": "null" + }, + "appversion": { + "description": "The semantic version string of the Container Application.", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the Container Application. This may contain information for the deployer of the containerapp.", + "type": "string", + "default": "null" + }, + "license": { + "$ref": "file:license.json" + } + } +} diff --git a/docs/spec/json/param.json b/docs/spec/json/param.json new file mode 100644 index 00000000..18f9811b --- /dev/null +++ b/docs/spec/json/param.json @@ -0,0 +1,33 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Parameter", + "description": "Name of the parameter as used in artifacts", + "type": "object", + "required": [ "name", "description" ], + "properties": { + "name": { + "description": "", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the parameter.", + "type": "string", + "default": "null" + }, + "constraints": { + "$ref": "file:constraint.json" + }, + "default": { + "description": "An optional default value for the parameter.", + "type": "string", + "default": "null" + }, + "hidden": { + "description": "An optional boolean signifying the parameter should be obscured when displayed.", + "type": "boolean", + "default": false + } + } +} diff --git a/docs/spec/json/provider.json b/docs/spec/json/provider.json new file mode 100644 index 00000000..99399758 --- /dev/null +++ b/docs/spec/json/provider.json @@ -0,0 +1,63 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Provider", + "description": "A provider is a deployment platform or orchestrator.", + "type": "array", + "items": { + "oneOf": [ { "$ref": "#/definitions/path" }, { "$ref": "#/definitions/repository" }, { "$ref": "#/definitions/inheritance" } ] + }, + + "definitions": { + "path": { + "description": "Path to the artifact", + "type": "string", + "default": "null" + }, + "repository": { + "type": "object", + "properties": { + "source": { + "name": "source", + "description": "Source location of the source control repository. The source MUST be specified by a valid URL.", + "type": "string", + "default": "null" + }, + "path": { + "name": "path", + "description": "The path to a specific artifact file or directory of artifact files. Default value is '/' which would reference all of the files in the repository.", + "type": "string", + "default": "/" + }, + "type": { + "name": "type", + "description": "The source control type. Default value is 'git'.", + "type": "string", + "default": "git" + }, + "branch": { + "name": "branch", + "description": "The source control branch. Default value is 'master'.", + "type": "string", + "default": "master" + }, + "tag": { + "name": "tag", + "description": "The source control tag.", + "type": "string", + "default": "null" + } + } + }, + "inheritance": { + "type": "object", + "properties": { + "inherit": { + "name": "inherit", + "description": "List of components whose artifacts will be added to the list of artifacts for the provider.", + "type": "array" + } + } + } + } +} diff --git a/docs/spec/json/requirement.json b/docs/spec/json/requirement.json new file mode 100644 index 00000000..fa97fe32 --- /dev/null +++ b/docs/spec/json/requirement.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Requirements", + "description": "Requirement objects", + "type": "array", + "items": { + "oneOf": [ + { "$ref": "file:requirements/persistentvolume.json" } + ] + } +} diff --git a/docs/spec/json/requirements/persistentvolume.json b/docs/spec/json/requirements/persistentvolume.json new file mode 100644 index 00000000..70b65a17 --- /dev/null +++ b/docs/spec/json/requirements/persistentvolume.json @@ -0,0 +1,34 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "PersistentVolume", + "description": "This describes a requirement for persistent, read-only or read-write storage that should be available to the Container Application at runtime. The name of this object MUST be 'persistentVolume'", + "type": "object", + "properties": { + "persistentVolume": { + "type": "object", + "required": [ "name", "accessMode", "size" ], + "properties": { + "name": { + "description": "A name associated with the storage requirement.", + "type": "string", + "default": "null" + }, + "accessMode": { + "description": "The access mode, read-write or read-only, for the storage", + "type": "string", + "enum": [ + "ReadWrite", + "ReadOnly" + ] + }, + "size": { + "description": "Size of the storage.", + "type": "number", + "minimum": 0 + } + } + } + }, + "additionalProperties": false +} diff --git a/docs/spec/json/schema.json b/docs/spec/json/schema.json new file mode 100644 index 00000000..e7e2d56e --- /dev/null +++ b/docs/spec/json/schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Schema", + "version": "0.0.2", + "description": "The Container Application specification defines a set of configuration files that describe a Container Application. A Container Application is composed of a set of container-based services and/or other Container Applications that together provide an application. These configuration files can be used by tools to deploy the application in an automated way or with customizations as specified by the deployer. Developers tools can generate most of the required files and utilities, such as testing tools, can take advantage of these files.", + "required": [ "id", "specversion", "graph" ], + "properties": { + "id": { + "description": "The machine readable id of the Container Application.", + "type": "string", + "default": "null" + }, + "specversion": { + "description": "The semantic version string of the Container Application Specification used to describe the app. The value SHOULD be '0.0.2'.", + "type": "string", + "default": "0.0.2" + }, + "metadata": { + "$ref": "file:metadata.json" + }, + "params": { + "description": "A list of ParamsObject that contain information in the global context of the application, accessible to it's child graph items.", + "type": "array", + "items": { + "$ref": "file:param.json" + } + }, + "graph": { + "$ref": "file:graph.json" + }, + "requirements": { + "$ref": "file:requirement.json" + } + } +} diff --git a/docs/spec_coverage.md b/docs/spec_coverage.md new file mode 100644 index 00000000..afe4ac2a --- /dev/null +++ b/docs/spec_coverage.md @@ -0,0 +1,10 @@ +# Implementation + +This is a table of a list of current functions which are implemented by Atomic App against the Nulecule [spec](https://github.com/projectatomic/nulecule). + +| Status | Description | Version implemented in | Notes | +|--------|-------------|------------------------|-------| +| Completed | Persistent storage | 0.3.1 | Current functionality is only for the Kubernetes provider | +| Incomplete | ssh/sftp/ftp support | - | - | +| Completed | Docker image support and extraction | 0.1.1 | - | +| Completed | XPathing | 0.2.1 | Support for JSON pointers for artifacts | diff --git a/docs/start_guide.md b/docs/start_guide.md new file mode 100644 index 00000000..2b01c6d0 --- /dev/null +++ b/docs/start_guide.md @@ -0,0 +1,271 @@ +# Getting Started + +This is a thorough start guide to show you each detail of an Atomic App. Teaching you the basic commands as well as the generation of your first Atomic App. + +## Basic commands + +The __four__ basic commands of atomicapp are: + +__atomicapp fetch__: Retrieving a packaged container and exporting it to a directory. + +ex. `atomicapp fetch projectatomic/helloapache` + +__atomicapp run__: Running a packaged container on a specified provider. Unless a directory is specified, `run` will also perform `fetch`. + +ex. `atomicapp run projectatomic/helloapache --provider=kubernetes` + +__atomicapp stop__: Stopping a deployed Nulecule on a specified provider. Whether you're using Kubernetes, OpenShift or Docker, Atomic App will stop the containers. + +ex. `atomicapp stop ./myappdir --provider=kubernetes` + +__atomicapp genanswers__: By examing the `Nulecule` file. Atomic App will generate an `answers.conf` file to be used for non-interactive deployment. + +ex. `atomicapp genanswers ./myappdir` + +For more detailed information as well as a list of all parameters, use `atomicapp --help` on the command line. Alternatively, you can read our [CLI doc](/docs/cli.md). + +## Atomic App on Project Atomic hosts + +If you are on a [Project Atomic host](https://projectatomic.io/download) you can interact with `atomicapp` via the `atomic` cli command. + +Some commands for `atomicapp` on an atomic host are a bit different. + +However. Regardless of the `atomic run` command, a `--mode` can be passed to change the functionality of the command. + +| Atomic App | Atomic CLI +|-----------|--------| +| `atomicapp fetch projectatomic/helloapache` | `atomic run projectatomic/helloapache --mode fetch` +| `atomicapp run projectatomic/helloapache` | `atomic run projectatomic/helloapache` +| `atomicapp stop ./myappdir` | `atomic stop projectatomic/helloapache ./myappdir` +| `atomicapp genanswers ./myappdir` | `atomic run projectatomic/helloapache ./myappdir --mode genanswers` + +## Building your first Atomic App + +A typical Atomic App or "Nulecule" container consists of the following files: +```sh +~/helloapache +▶ tree +. +├── answers.conf.sample +├── artifacts +│   ├── docker +│   │   └── hello-apache-pod_run +│   ├── kubernetes +│   │   └── hello-apache-pod.json +│   └── marathon +│   └── helloapache.json +├── Dockerfile +├── Nulecule +└── README.md +``` + +We will go through each file and folder as we build our first Atomic App container. + +For this example, we will be using the [helloapache](https://github.com/projectatomic/nulecule-library/tree/master/helloapache) example from the [nulecule-library](https://github.com/projectatomic/nulecule-library) repo. + +In order to follow along, fetch the container and `cd` into the directory: +```sh +atomicapp fetch --destination localdir projectatomic/helloapache +cd localdir +``` + +### ./localdir/Dockerfile +Atomic App itself is packaged as a container. End-users typically do not install the software from source, instead using the `atomicapp` container as the `FROM` line in a Dockerfile and packaging your application on top. For example: + + +```Dockerfile +FROM projectatomic/atomicapp + +MAINTAINER Your Name + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts +``` + +Within `helloapache` we specify a bit more within our labels: +```Dockerfile +FROM projectatomic/atomicapp:0.4.2 + +MAINTAINER Red Hat, Inc. + +LABEL io.projectatomic.nulecule.providers="kubernetes,docker,marathon" \ + io.projectatomic.nulecule.specversion="0.0.2" + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts +``` + +__Optionally__, you may indicate what providers you specifically support via the Docker LABEL command. + +__NOTE:__ The Dockerfile you supply here is for building a Nuleculized container image (often called an 'Atomic App'). It is not the Dockerfile you use to build your upstream Docker image. The actual `atomicapp` code should already be built at this time and imported in the `FROM projectatomic/atomicapp` line. + +### ./localdir/Nulecule + +This is the `Nulecule` file for Atomic App. The `Nulecule` file is composed of graph and metadata in order to link one or more containers for your application. +```yaml +--- +specversion: 0.0.2 +id: helloapache-app + +metadata: + name: Hello Apache App + appversion: 0.0.1 + description: Atomic app for deploying a really basic Apache HTTP server + +graph: + - name: helloapache-app + + params: + - name: image + description: The webserver image + default: centos/httpd + - name: hostport + description: The host TCP port as the external endpoint + default: 80 + + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` + +##### Spec and id information +```yaml +--- +specversion: 0.0.2 +id: helloapache-app +``` + +Here we indicate the specversion of our Atomic App (similar to a `v1` or `v2` of an API) as well as our ID. + +##### Metadata +```yaml +metadata: + name: Hello Apache App + appversion: 0.0.1 + description: Atomic app for deploying a really basic Apache HTTP server +``` + +__Optionally__, a good metadata section will indiciate to a user of your app what it does as well as what version it's on. + +##### Graph + +```yaml +graph: + - name: helloapache-app + + params: + - name: image + description: The webserver image + default: centos/httpd + - name: hostport + description: The host TCP port as the external endpoint + default: 80 + + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` + +__Graph__ is the most important section. In here we will indicate all the default parameters as well as all associated artifacts. + +```yaml +params: + - name: image + description: The webserver image + default: centos/httpd +``` +There will likely be many parameters that need to be exposed at deployment. It's best to provide defaults whenever possible. Variable templating is used within artifact files. For example: `$image` within `artifacts/kubernetes/hello-apache-pod.json` becomes `centos/httpd`. + +**NOTE:** Not providing a default variable will require Atomic App to ask the user. Alternatively, an `answers.conf` file can be provided. + +```yaml +artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` +In order to use a particular provider, name as well as a file location required. Each file is a variable-subtituted template of how your Atomic App container is ran. We go more into detail below. + +```yaml +kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + - file://artifacts/kubernetes/hello-apache-service.json +``` +Multiple files may also be specified. For example, specifying a pod, service and replication controller for the `kubernetes` provider. + +### ./localdir/artifacts/docker/hello-apache-pod_run +```sh +docker run -d -p $hostport:80 $image +``` +Each artifact uses variable replacement values. For our Docker provider, we substitute the port number with `$hostport` as indicated by our graph in our `Nulecule` file. The same as our `$image` variable. + +### ./localdir/artifacts/kubernetes/hello-apache-pod.json +```json +"image": "$image", +"name": "helloapache", +"ports": [ + { + "containerPort": 80, + "hostPort": $hostport, + "protocol": "TCP" + } +``` + +Similarly, the kubernetes provider uses both `$image` and `$hostport` variables for pod deployment. + +### ./localdir/answers.conf.sample + +`answers.conf.sample` is an answers file generated while fetching. It is a generated ini file that provides parameter answers for non-interactive deployments. + +```ini +[helloapache-app] +image = centos/httpd +hostport = 80 + +[general] +namespace = default +provider = kubernetes +``` + +Default values such as the provider as well as the namespace can be provided. + +In order to use an answers file, simply specify the location of the file when deploying: +```sh +cp answers.conf.sample answers.conf +sudo atomicapp run -a answers.conf . +``` + +### Conclusion + +Now you know how to build your very own first app! After you have created the necessary files go ahead and build/run it! + +```sh +docker build -t myapp . +sudo atomicapp run myapp +``` + +Atomic App is portable and hence you can also deploy regardless of the host: +```sh +# Host 1 +docker build -t myrepo/myapp . +docker push myrepo/myapp + +# Host 2 +docker pull myrepo/myapp +sudo atomicapp run myrepo/myapp +``` + +Although we have yet to cover every `atomicapp` command. Feel free to use `atomicapp [run/fetch/stop] --help` for a list of all options. + +For an extended guide on the `Nulecule` file, read our [extended Nulecule doc](nulecule.md). diff --git a/requirements.txt b/requirements.txt index e5cbfc5f..b39d04b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ anymarkup -lockfile jsonpointer requests websocket-client diff --git a/script/binary.sh b/script/binary.sh new file mode 100755 index 00000000..549680ce --- /dev/null +++ b/script/binary.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex +pip install -r requirements.txt +pip install pyinstaller + +# Due to the way that we dynamically load providers via import_module +# in atomicapp/plugin.py we have to specify explicitly the modules directly +# so pyinstaller can "see" them. +pyinstaller atomicapp.spec + +mkdir -p bin +mv dist/main bin/atomicapp +echo "Binary created at bin/atomicapp" diff --git a/script/release.sh b/script/release.sh new file mode 100755 index 00000000..beee7ffa --- /dev/null +++ b/script/release.sh @@ -0,0 +1,326 @@ +#!/bin/bash +UPSTREAM_REPO="projectatomic" +CLI="atomicapp" +LIBRARY="nulecule-library" + + +usage() { + echo "This will prepare Atomic App for release!" + echo "" + echo "Requirements:" + echo " git" + echo " gpg - with a valid GPG key already generated" + echo " hub" + echo " github-release" + echo " GITHUB_TOKEN in your env variable" + echo " " + echo "Not only that, but you must have permission for:" + echo " Tagging releases for Atomic App on Github" + echo " Access to hub.docker.com builds" + echo "" +} + +requirements() { + if [ ! -f /usr/bin/git ] && [ ! -f /usr/local/bin/git ]; then + echo "No git. What's wrong with you?" + return 1 + fi + + if [ ! -f /usr/bin/gpg ] && [ ! -f /usr/local/bin/gpg ]; then + echo "No gpg. What's wrong with you?" + return 1 + fi + + if [ ! -f $GOPATH/bin/github-release ]; then + echo "No $GOPATH/bin/github-release. Please run 'go get -v github.com/aktau/github-release'" + return 1 + fi + + if [ ! -f /usr/bin/hub ]; then + echo "No hub. Please run install hub @ github.com/github/hub" + return 1 + fi + + if [[ -z "$GITHUB_TOKEN" ]]; then + echo "export GITHUB_TOKEN=yourtoken needed for using github-release" + fi +} + +# Clone and then change to user's upstream repo for pushing to master / opening PR's :) +clone() { + git clone ssh://git@github.com/$UPSTREAM_REPO/$CLI.git + if [ $? -eq 0 ]; then + echo OK + else + echo FAIL + exit + fi + cd $CLI + git remote remove origin + git remote add origin git@github.com:$ORIGIN_REPO/$CLI.git + git checkout -b release-$1 + cd .. +} + +replaceversion() { + cd $CLI + OLD_VERSION=`python setup.py --version` + echo "OLD VERSION:" $OLD_VERSION + + echo "1. Replaced Dockerfile versioning" + find . -name 'Dockerfile*' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "2. Replaced .py versioning" + find . -name '*.py' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "3. Replaced docs versioning" + find docs/ -name '*.md' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "4. Replaced README.md versioning" + sed -i "s/$OLD_VERSION/$1/g" README.md + + cd .. +} + +changelog() { + cd $CLI + echo "Getting commit changes. Writing to ../changes.txt" + LOG=`git shortlog --email --no-merges --pretty=%s ${1}..` + echo -e "\`\`\`\n$LOG\n\`\`\`" > ../changes.txt + echo "Changelog has been written to changes.txt" + echo "!!PLEASE REVIEW BEFORE CONTINUING!!" + echo "Open changes.txt and add the release information" + echo "to the beginning of the file before the git shortlog" + cd .. +} + +changelog_md() { + echo "Generating CHANGELOG.md" + CHANGES=$(cat changes.txt) + cd $CLI + DATE=$(date +"%m-%d-%Y") + CHANGELOG=$(cat CHANGELOG.md) + HEADER="## Atomic App $1 ($DATE)" + echo -e "$HEADER\n\n$CHANGES\n\n$CHANGELOG" >CHANGELOG.md + echo "Changes have been written to CHANGELOG.md" + cd .. +} + +git_commit() { + cd $CLI + + BRANCH=`git symbolic-ref --short HEAD` + if [ -z "$BRANCH" ]; then + echo "Unable to get branch name, is this even a git repo?" + return 1 + fi + echo "Branch: " $BRANCH + + git add . + git commit -m "$1 Release" + git push origin $BRANCH + hub pull-request -b $UPSTREAM_REPO/$CLI:master -h $ORIGIN_REPO/$CLI:$BRANCH + + cd .. + echo "" + echo "PR opened against master" + echo "" +} + +sign() { + # Tarball it! + cp -r $CLI $CLI-$1 + sudo rm -rf $CLI-$1/.git* + sudo tar czf $CLI-$1.tar.gz $CLI-$1 + if [ $? -eq 0 ]; then + echo TARBALL OK + else + echo TARBALL FAIL + exit + fi + + # Sign it! + echo -e "SIGN THE TARBALL!\n" + gpg --detach-sign --armor $CLI-$1.tar.gz + if [ $? -eq 0 ]; then + echo SIGN OK + else + echo SIGN FAIL + exit + fi + + echo "" + echo "The tar.gz. is now located at $CLI-$1.tar.gz" + echo "and the signed one at $CLI-$1.tar.gz.asc" + echo "" +} + +push() { + CHANGES=$(cat changes.txt) + # Release it! + github-release release \ + --user $UPSTREAM_REPO \ + --repo $CLI \ + --tag $1 \ + --name "$1" \ + --description "$CHANGES" + if [ $? -eq 0 ]; then + echo RELEASE UPLOAD OK + else + echo RELEASE UPLOAD FAIL + exit + fi + + github-release upload \ + --user $UPSTREAM_REPO \ + --repo $CLI \ + --tag $1 \ + --name "$CLI-$1.tar.gz" \ + --file $CLI-$1.tar.gz + if [ $? -eq 0 ]; then + echo TARBALL UPLOAD OK + else + echo TARBALL UPLOAD FAIL + exit + fi + + github-release upload \ + --user $UPSTREAM_REPO \ + --repo $CLI\ + --tag $1 \ + --name "$CLI-$1.tar.gz.asc" \ + --file $CLI-$1.tar.gz.asc + if [ $? -eq 0 ]; then + echo SIGNED TARBALL UPLOAD OK + else + echo SIGNED TARBALL UPLOAD FAIL + exit + fi + + echo "DONE" + echo "DOUBLE CHECK IT:" + echo "!!!" + echo "https://github.com/$UPSTREAM_REPO/$CLI/releases/edit/$1" + echo "!!!" + echo "REMEMBER TO UPDATE DOCKER BUILDS! :D" +} + +update_library() { + BRANCH=sync-with-$1 + rm -rf $LIBRARY + + # Clone + git clone ssh://git@github.com/$UPSTREAM_REPO/$LIBRARY.git + if [ $? -eq 0 ]; then + echo OK + else + echo FAIL + exit + fi + cd $LIBRARY + git remote remove origin + git remote add origin git@github.com:$ORIGIN_REPO/$LIBRARY.git + git checkout -b $BRANCH + + # Commit + find . -type f -iname 'Dockerfile' -exec sed -i "s,FROM projectatomic/atomicapp:[0-9].[0-9].[0-9],FROM projectatomic/atomicapp:$1," "{}" +; + git add . + git commit -m "Sync with $1 release" + git push origin $BRANCH + hub pull-request -b $UPSTREAM_REPO/$LIBRARY:master -h $ORIGIN_REPO/$LIBRARY:$BRANCH + cd .. +} + +clean() { + rm -rf $CLI $CLI-$1 $CLI-$1.tar.gz $CLI-$1.tar.gz.asc $LIBRARY changes.txt +} + +main() { + local cmd=$1 + usage + + echo "What is your Github username? (location of your atomicapp fork)" + read ORIGIN_REPO + echo "You entered: $ORIGIN_REPO" + echo "" + + echo "" + echo "First, please enter the version of the NEW release: " + read VERSION + echo "You entered: $VERSION" + echo "" + + echo "" + echo "Second, please enter the version of the LAST release: " + read PREV_VERSION + echo "You entered: $PREV_VERSION" + echo "" + + clear + + echo "Now! It's time to go through each step of releasing Atomic App!" + echo "If one of these steps fails / does not work, simply re-run ./release.sh" + echo "Re-enter the information at the beginning and continue on the failed step" + echo "" + + PS3='Please enter your choice: ' + options=( + "Git clone master" + "Replace version number" + "Generate changelog" + "Generate changelog for release" + "Create PR against atomicapp" + "!!! Before continuing, make sure the Atomic App release PR has been merged !!!" + "Update and create PR against nulecule-library" + "Tarball and sign atomicapp - requires gpg key" + "Upload the tarball and push to Github release page" + "!!! Build the new atomicapp docker image on hub.docker.com with the tagged release and then merge the nulecule-library PR !!!" + "Clean" + "Quit") + select opt in "${options[@]}" + do + echo "" + case $opt in + "Git clone master") + clone $VERSION + ;; + "Replace version number") + replaceversion $VERSION + ;; + "Generate changelog") + changelog $PREV_VERSION + ;; + "Generate changelog for release") + changelog_md $VERSION + ;; + "Create PR against atomicapp") + git_commit $VERSION + ;; + "Update and create PR against nulecule-library") + update_library $VERSION + ;; + "Tarball and sign atomicapp - requires gpg key") + sign $VERSION + ;; + "Upload the tarball and push to Github release page") + push $VERSION + ;; + "Clean") + clean $VERSION + ;; + "Quit") + clear + break + ;; + *) echo invalid option;; + esac + echo "" + done +} + +main "$@" +echo "If you're done, make sure you have done the following:" +echo " Triggered hub.docker.com build for the new atomicapp version" +echo " Merge the nulecule-library PR so the new containers have been created" +echo " Upload the new release to download.projectatomic.io and edit index.html" +echo "" diff --git a/setup.py b/setup.py index d0014388..ccf55210 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.3.0', + version='0.6.4', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', @@ -51,7 +51,10 @@ def _install_requirements(): 'console_scripts': ['atomicapp=atomicapp.cli.main:main'], }, packages=find_packages(), - package_data={'atomicapp': ['providers/external/kubernetes/*.yaml']}, + package_data={'atomicapp': ['providers/external/kubernetes/*.yaml', + 'nulecule/external/templates/nulecule/*.tpl', + 'nulecule/external/templates/nulecule/artifacts/docker/*.tpl', + 'nulecule/external/templates/nulecule/artifacts/kubernetes/*.tpl']}, include_package_data=True, install_requires=_install_requirements() ) diff --git a/test-requirements.txt b/test-requirements.txt index b68bb921..f292f6cb 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,3 +2,4 @@ flake8 mock pep8 pytest-cov +pytest-localserver diff --git a/tests/units/cli/test_cli.py b/tests/units/cli/test_cli.py index 2459b9ca..1b3e62e7 100644 --- a/tests/units/cli/test_cli.py +++ b/tests/units/cli/test_cli.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -71,13 +71,13 @@ def test_run_helloapache_app(self): assert exec_info.value.code == 0 - def test_install_helloapache_app(self): + def test_fetch_helloapache_app(self): command = [ "main.py", "--verbose", "--answers-format=json", "--dry-run", - "install", + "fetch", self.examples_dir + 'helloapache/' ] @@ -119,13 +119,13 @@ def test_run_helloapache_app_marathon(self): assert exec_info.value.code == 0 - def test_install_helloapache_app_docker(self): + def test_fetch_helloapache_app_docker(self): command = [ "main.py", "--verbose", "--answers-format=json", "--dry-run", - "install", + "fetch", self.examples_dir + 'helloapache/' ] diff --git a/tests/units/cli/test_cli_gitlab_example.py b/tests/units/cli/test_cli_gitlab_example.py index e5369335..b0451a30 100644 --- a/tests/units/cli/test_cli_gitlab_example.py +++ b/tests/units/cli/test_cli_gitlab_example.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -76,12 +76,12 @@ def tearDownClass(cls): os.remove(os.path.join(root, f)) # Installs the gitlab example similarly to `test_cli.py` examples - def test_install_gitlab_app(self): + def test_fetch_gitlab_app(self): command = [ "main.py", "--verbose", "--dry-run", - "install", + "fetch", self.examples_dir + 'gitlab/' ] diff --git a/tests/units/cli/test_default_provider.py b/tests/units/cli/test_default_provider.py index 7a9aeea3..c5e56ec7 100644 --- a/tests/units/cli/test_default_provider.py +++ b/tests/units/cli/test_default_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -61,16 +61,16 @@ def test_run_helloapache_app(self, capsys): # Run the dry-run command with pytest.raises(SystemExit) as exec_info: self.exec_cli(command) - nil, out = capsys.readouterr() + stdout, stderr = capsys.readouterr() # Tear down and remove all those useless generated files self.tear_down() # Print out what we've captured just in case the test fails - print out + print stdout # Since this a Docker-only provider test, docker *should* be in it, NOT Kubernetes - assert "'provider': 'docker'" in out - assert "Deploying to Kubernetes" not in out + assert "provider: Docker" in stdout + assert "Deploying to Kubernetes" not in stdout assert exec_info.value.code == 0 diff --git a/tests/units/cli/test_examples/gitlab/Dockerfile b/tests/units/cli/test_examples/gitlab/Dockerfile deleted file mode 100644 index 28ef1ded..00000000 --- a/tests/units/cli/test_examples/gitlab/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Navid Shaikh - -LABEL io.projectatomic.nulecule.specversion 0.0.2 -LABEL io.projectatomic.nulecule.providers "kubernetes" - -LABEL Build docker build --rm --tag swordphilic/gitlab-centos7-atomicapp . - -ADD /Nulecule /Dockerfile /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/guestbook-go/Dockerfile b/tests/units/cli/test_examples/guestbook-go/Dockerfile deleted file mode 100644 index 70e9f565..00000000 --- a/tests/units/cli/test_examples/guestbook-go/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Jason Brooks - -LABEL io.projectatomic.nulecule.providers="kubernetes" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/guestbook-go/README.md b/tests/units/cli/test_examples/guestbook-go/README.md deleted file mode 100644 index e23813ce..00000000 --- a/tests/units/cli/test_examples/guestbook-go/README.md +++ /dev/null @@ -1,62 +0,0 @@ -This is the [guestbook-go](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook-go) sample application from the kubernetes project, packaged as an atomic application based on the nulecule specification. - -Kubernetes is currently the only supported provider. You'll need to run this from a workstation that has the atomic CLI and kubectl client that can connect to a kubernetes master. This example depends on kube-dns being configured on your kubernetes cluster. - -### Step 1 - -Build: - -``` -# docker build -t $USER/guestbookgo-atomicapp . -``` - -### Step 2 - -Install and Run: - - -``` -# atomic install $USER/guestbookgo-atomicapp -# atomic run $USER/guestbookgo-atomicapp -``` - -### Step 3 - -Access the guestbook through a random NodePort on your cluster. Find the port by running: - -``` -$ kubectl describe service guestbook | grep NodePort - -NodePort: 31288/TCP -``` - -To find the ip address on your node, run: - -``` -$ kubectl get nodes -NAME LABELS STATUS -kube-node-1 kubernetes.io/hostname=kube-node-1 Ready -``` - -And using the node name from above, run: - -``` -$ kubectl describe nodes kube-node-1 | grep Addresses -Addresses: 192.168.121.174 -``` - -Once the app's container images are pulled and pods are running, you'll be able to reach the guestbook: - -``` -curl 192.168.121.174:31288 - - - - - - - - Guestbook - -... -``` diff --git a/tests/units/cli/test_examples/helloapache/Dockerfile b/tests/units/cli/test_examples/helloapache/Dockerfile deleted file mode 100644 index 6c6ce3d6..00000000 --- a/tests/units/cli/test_examples/helloapache/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Aaron Weitekamp - -LABEL io.projectatomic.nulecule.providers="kubernetes,docker" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/helloapache/README.md b/tests/units/cli/test_examples/helloapache/README.md deleted file mode 100644 index abac3e68..00000000 --- a/tests/units/cli/test_examples/helloapache/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# helloapache - -This is an atomic application based on the nulecule specification. Kubernetes and native docker are currently the only supported providers. You'll need to run this from a workstation that has the atomic command. If you wish to use the kubernetes provider, you will also need a kubectl client that can connect to a kubernetes master. - -It's a single container application based on the centos/httpd image, but you can use your own. - -## Option 1: Non-interactive defaults - -Run the image. It will automatically use kubernetes as the orchestration provider. -``` -[sudo] atomic run projectatomic/helloapache -``` - -Note: This option is not interactive because all params in the Nulecule file have default values. - -## Option 2: Unattended - -1. Create the file `answers.conf` with these contents: - - This sets up the values for the two configurable parameters (image and hostport) and indicates that kubernetes should be the orchestration provider. - - [general] - provider = kubernetes - - [helloapache-app] - image = centos/httpd # optional: choose a different image - hostport = 80 # optional: choose a different port to expose -1. Run the application from the current working directory - - $ [sudo] atomic run projectatomic/helloapache - ... - helloapache - - -1. As an additional experiment, remove the kubernetes pod and change the provider to 'docker' and re-run the application to see it get deployed on base docker. - -## Option 3: Install and Run - -You may want to download the application, review the configuraton and parameters as specified in the Nulecule file, and edit the answerfile before running the application. - -1. Download the application files using `atomic install` - - [sudo] atomic install projectatomic/helloapache - -1. Rename `answers.conf.sample` - - mv answers.conf.sample answers.conf - -1. Edit `answers.conf`, review files if desired and then run - - $ [sudo] atomic run projectatomic/helloapache - ... - helloapache - -## Test -Any of these approaches should create a kubernetes pod or a running docker container. - -With a kubernetes pod, once its state is "Running" curl the minion it's running on. - -``` -$ kubectl get pod helloapache -POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS -helloapache 172.17.0.8 helloapache centos/httpd 10.3.9.216/ name=helloapache Running -$ curl 10.3.9.216 - -``` - -If you test the docker provider, once the container is running, curl the port on your localhost. - -``` -$ curl localhost - -``` diff --git a/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile b/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile deleted file mode 100644 index 4ed549c3..00000000 --- a/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.3 - -MAINTAINER Jason Brooks - -LABEL io.projectatomic.nulecule.specversion 0.0.2 -LABEL io.projectatomic.nulecule.providers = "docker" - -ADD /Nulecule /Dockerfile /answers.conf /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile b/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile deleted file mode 100644 index 6c6ce3d6..00000000 --- a/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Aaron Weitekamp - -LABEL io.projectatomic.nulecule.providers="kubernetes,docker" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule b/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule index 75ae781c..98552ce1 100644 --- a/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule +++ b/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule @@ -6,6 +6,10 @@ metadata: name: Hello Apache App appversion: 0.0.1 description: Atomic app for deploying a really basic Apache HTTP server +params: + - name: provider + description: The provider that is used to deploy the application + default: docker graph: - name: helloapache-app params: diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile deleted file mode 100644 index 11c7dda8..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Christoph Görn - -LABEL io.projectatomic.nulecule.specversion 0.0.1-alpha - -LABEL Build docker build --rm --tag goern/wordpress-centos7-atomicapp . - - -ADD /Nulecule /Dockerfile README.asciidoc gpl-3.0.txt /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc deleted file mode 100644 index 0a912f72..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -= Wordpress Atomic App - -This is a Wordpress Atomic App, it will use reuse MySQL and SkyDNS to -provide a Kubernetes based Wordpress to you. - -== Usage - -`atomic run goern/wordpress-centos7-atomicapp` - -= Version - -This is version 1.1.0 of Wordpress Atomic App. diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt deleted file mode 100644 index 94a9ed02..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/tests/units/index/test_index.py b/tests/units/index/test_index.py new file mode 100644 index 00000000..c055d9cf --- /dev/null +++ b/tests/units/index/test_index.py @@ -0,0 +1,50 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import unittest +import mock +import os +import tempfile + +from atomicapp.index import Index + + +def mock_index_load_call(self, test): + self.index = {'location': '.', 'nulecules': [ + {'providers': ['docker'], 'id': 'test', 'metadata':{'appversion': '0.0.1', 'location': 'foo'}}]} + + +class TestIndex(unittest.TestCase): + + """ + Tests the index + """ + + # Tests listing the index with a patched self.index + @mock.patch("atomicapp.index.Index._load_index_file", mock_index_load_call) + def test_list(self): + a = Index() + a.list() + + # Test generation with current test_examples in cli + @mock.patch("atomicapp.index.Index._load_index_file", mock_index_load_call) + def test_generate(self): + self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-generation-test", dir="/tmp") + a = Index() + a.generate("tests/units/cli/test_examples", os.path.join(self.tmpdir, "index.yaml")) diff --git a/tests/units/kubeshift/external/example_kubeconfig b/tests/units/kubeshift/external/example_kubeconfig new file mode 100644 index 00000000..ef955b9c --- /dev/null +++ b/tests/units/kubeshift/external/example_kubeconfig @@ -0,0 +1,17 @@ +apiVersion: v1 +clusters: +- cluster: + server: http://localhost:8080 + name: dev +contexts: +- context: + cluster: dev + user: default + name: dev +current-context: dev +kind: Config +preferences: {} +users: +- name: default + user: + token: foobar diff --git a/tests/units/kubeshift/test_client.py b/tests/units/kubeshift/test_client.py new file mode 100644 index 00000000..ad01092b --- /dev/null +++ b/tests/units/kubeshift/test_client.py @@ -0,0 +1,72 @@ +import mock +import pytest +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + +@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeKubernetesClient") +def test_client_kubernetes(FakeClient): + Client(config, "kubernetes") + + +@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeOpenshiftClient") +def test_client_openshift(FakeClient): + Client(config, "openshift") + + +def test_client_load_failure(): + with pytest.raises(KubeClientError): + Client(config, "foobar") + + +# TODO +def test_client_create(): + pass + + +# TODO +def test_client_delete(): + pass + + +# TODO +def test_client_namespaces(): + pass diff --git a/tests/units/kubeshift/test_kubebase.py b/tests/units/kubeshift/test_kubebase.py new file mode 100644 index 00000000..1eb0274d --- /dev/null +++ b/tests/units/kubeshift/test_kubebase.py @@ -0,0 +1,93 @@ +import pytest +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import KubeConnectionError + + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} +kubebase = KubeBase(config) + + +def test_get_resources(httpserver): + content = '{"kind":"APIResourceList","groupVersion":"v1","resources":[{"name":"bindings","namespaced":true,"kind":"Binding"},{"name":"componentstatuses","namespaced":false,"kind":"ComponentStatus"}]}' + httpserver.serve_content(content, code=200, headers=None) + kubebase.get_resources(httpserver.url) + + +def test_get_groups(httpserver): + content = '{"kind":"APIGroupList","groups":[{"name":"autoscaling","versions":[{"groupVersion":"autoscaling/v1","version":"v1"}],"preferredVersion":{"groupVersion":"autoscaling/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"batch","versions":[{"groupVersion":"batch/v1","version":"v1"}],"preferredVersion":{"groupVersion":"batch/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"extensions","versions":[{"groupVersion":"extensions/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"extensions/v1beta1","version":"v1beta1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]}]}' + httpserver.serve_content(content, code=200, headers=None) + kubebase.get_groups(httpserver.url) + + +def test_connection(httpserver): + httpserver.serve_content(content="OK", code=200, headers=None) + kubebase.test_connection(httpserver.url) + + +def test_kind_to_resource_name(): + assert kubebase.kind_to_resource_name("Pod") == "pods" + assert kubebase.kind_to_resource_name("buildconfig") == "buildconfigs" + assert kubebase.kind_to_resource_name("policy") == "policies" + assert kubebase.kind_to_resource_name("petset") == "petsets" + assert kubebase.kind_to_resource_name("componentstatus") == "componentstatuses" + assert kubebase.kind_to_resource_name("Ingress") == "ingresses" + + +def test_request_methods_failures(): + with pytest.raises(KubeConnectionError): + kubebase.request("get", "http://localhost") + with pytest.raises(KubeConnectionError): + kubebase.request("post", "http://localhost") + with pytest.raises(KubeConnectionError): + kubebase.request("put", "http://localhost") + with pytest.raises(KubeConnectionError): + kubebase.request("delete", "http://localhost") + with pytest.raises(KubeConnectionError): + kubebase.request("patch", "http://localhost") + + +def test_request_timeout(httpserver): + httpserver.serve_content(content="Time out", code=408, headers=None) + with pytest.raises(KubeConnectionError): + kubebase.request("get", httpserver.url) + + +def test_request_ok(httpserver): + httpserver.serve_content(content="OK", code=200, headers=None) + kubebase.request("get", httpserver.url) + + +def test_websocket_request_without_ssl(): + # Should get an attribute error if there is no "cert_ca" to the base config + with pytest.raises(AttributeError): + kubebase.websocket_request("http://foobar") diff --git a/tests/units/kubeshift/test_kubeconfig.py b/tests/units/kubeshift/test_kubeconfig.py new file mode 100644 index 00000000..6a89debf --- /dev/null +++ b/tests/units/kubeshift/test_kubeconfig.py @@ -0,0 +1,195 @@ +import unittest +import pytest +import tempfile +import os +from atomicapp.plugin import ProviderFailedException +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig + + +class TestKubeConfParsing(unittest.TestCase): + + def test_from_file(self): + """ + Test parsing a hello world JSON example and returning back the + respective anymarkup content + """ + _, tmpfilename = tempfile.mkstemp() + f = open(tmpfilename, 'w') + f.write("{ 'hello': 'world'}") + f.close() + KubeConfig.from_file(tmpfilename) + + def test_from_params(self): + KubeConfig.from_params("foo", "bar", "foo", "bar") + + def test_parse_kubeconf_from_file_failure(self): + _, tmpfilename = tempfile.mkstemp() + f = open(tmpfilename, 'w') + f.write("{ 'hello': 'world'}") + f.close() + with pytest.raises(KeyError): + KubeConfig.parse_kubeconf(tmpfilename) + + def test_parse_kubeconf_from_file(self): + example_kubeconfig = os.path.dirname(__file__) + '/external/example_kubeconfig' + KubeConfig.parse_kubeconf(example_kubeconfig) + + def test_parse_kubeconf_data_insecure(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and skipping tls verification + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'insecure-skip-tls-verify': 'true', + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), + {'provider-api': 'server1', + 'provider-auth': 'token1', + 'namespace': 'namespace1', + 'provider-tlsverify': False, + 'provider-cafile': None}) + + def test_parse_kubeconf_data_cafile(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and certificate-authority + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'certificate-authority': '/foo/bar', + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), + {'provider-api': 'server1', + 'provider-auth': 'token1', + 'namespace': 'namespace1', + 'provider-tlsverify': True, + 'provider-cafile': '/foo/bar'}) + + def test_parse_kubeconf_data_no_context(self): + """ + Test parsing kubeconf data with missing context data for + current context. + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertRaises(ProviderFailedException, + KubeConfig.parse_kubeconf_data, kubecfg_data) + + def test_parse_kubeconf_data_no_user(self): + """ + Test parsing kubeconf data with missing user data in current + context. + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'server': 'server1' + } + } + ], + 'users': [ + ] + } + + self.assertRaises(ProviderFailedException, + KubeConfig.parse_kubeconf_data, kubecfg_data) diff --git a/tests/units/kubeshift/test_kubernetes.py b/tests/units/kubeshift/test_kubernetes.py new file mode 100644 index 00000000..914c31a6 --- /dev/null +++ b/tests/units/kubeshift/test_kubernetes.py @@ -0,0 +1,82 @@ +import mock +from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'pod', 'pods'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + return None, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase") +def test_create(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeKubernetesClient(config) + a.create(k8s_object, "foobar") + + +@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase") +def test_delete(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeKubernetesClient(config) + a.delete(k8s_object, "foobar") diff --git a/tests/units/kubeshift/test_openshift.py b/tests/units/kubeshift/test_openshift.py new file mode 100644 index 00000000..4ab5e1ad --- /dev/null +++ b/tests/units/kubeshift/test_openshift.py @@ -0,0 +1,141 @@ +import mock +from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'template', 'Route'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + return None, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_k8s_create(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.get_resources.return_value = ['Pod'] + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeOpenshiftClient(config) + a.create(k8s_object, "foobar") + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_oc_create(mock_class): + mock_class.return_value = FakeClient() + mock_class.get_resources.return_value = ['Route'] + mock_class.kind_to_resource_name.return_value = 'Route' + + oc_object = {"apiVersion": "v1", "kind": "Route", "metadata": {"labels": {"name": "helloapache-route"}, "name": "helloapache-route"}, "spec": { + "host": "$endpoint", "to": [{"kind": "Service", "name": "helloapache-svc"}]}} + a = KubeOpenshiftClient(config) + a.create(oc_object, "foobar") + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_oc_delete(mock_class): + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Route' + + oc_object = {"apiVersion": "v1", "kind": "Route", "metadata": {"labels": {"name": "helloapache-route"}, "name": "helloapache-route"}, "spec": { + "host": "$endpoint", "to": [{"kind": "Service", "name": "helloapache-svc"}]}} + a = KubeOpenshiftClient(config) + a.delete(oc_object, "foobar") + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_k8s_delete(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeOpenshiftClient(config) + a.delete(k8s_object, "foobar") + + +class FakeOpenshiftTemplateClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'template'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + openshift_object = {} + openshift_object['objects'] = [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": {"description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}] + return openshift_object, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_process_template(mock_class): + # Mock the API class + mock_class.return_value = FakeOpenshiftTemplateClient() + mock_class.kind_to_resource_name.return_value = 'template' + + openshift_template = {"kind": "Template", "apiVersion": "v1", "metadata": {"name": "foobar"}, "objects": [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": { + "description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}]} + + a = KubeOpenshiftClient(config) + a.create(openshift_template, "foobar") + a.delete(openshift_template, "foobar") diff --git a/tests/units/nulecule/artifact_xpath_test/artifacts/docker/hello-apache-pod_run b/tests/units/nulecule/artifact_xpath_test/artifacts/docker/hello-apache-pod_run new file mode 100644 index 00000000..e69de29b diff --git a/tests/units/nulecule/artifact_xpath_test/artifacts/kubernetes/hello-apache-pod.json b/tests/units/nulecule/artifact_xpath_test/artifacts/kubernetes/hello-apache-pod.json new file mode 100644 index 00000000..e69de29b diff --git a/tests/units/nulecule/invalid_nulecule/Nulecule b/tests/units/nulecule/invalid_nulecule/Nulecule new file mode 100644 index 00000000..e1e3add5 --- /dev/null +++ b/tests/units/nulecule/invalid_nulecule/Nulecule @@ -0,0 +1,37 @@ +{ + "specversion": "0.0.2", + "id": "helloapache-app", + "metadata": { + "name": "Hello Apache App", + "appversion": "0.0.1", + "description": "Atomic app for deploying a really basic Apache HTTP server" + }, + "graph": [ + { + "name": "helloapache-app", + "params": [[ + { + "name": "image", + "description": "The webserver image", + "default": "centos/httpd" + }, + { + "name": "hostport", + "description": "The host TCP port as the external endpoint", + "default": 80 + } + ], + "artifacts": { + "docker": [ + "file://artifacts/docker/hello-apache-pod_run" + ], + "kubernetes": [ + "file://artifacts/kubernetes/hello-apache-pod.json" + ], + "marathon": [ + "file://artifacts/marathon/helloapache.json" + ] + } + } + ] +} diff --git a/tests/units/nulecule/test_lib.py b/tests/units/nulecule/test_lib.py new file mode 100644 index 00000000..d313429e --- /dev/null +++ b/tests/units/nulecule/test_lib.py @@ -0,0 +1,43 @@ +import mock +import unittest + +from atomicapp.nulecule.lib import NuleculeBase +from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config + + +class TestNuleculeBaseGetProvider(unittest.TestCase): + """ Test NuleculeBase get_provider""" + def test_get_provider_success(self): + """ + Test if get_provider method when passed a particular valid key returns + the corresponding class. + """ + nb = NuleculeBase(params = [], basepath = '', namespace = '') + provider_key = u'openshift' + # method `get_provider` will read from this config, we give it here + # since we have neither provided it before nor it is auto-generated + nb.config = Config(answers={u'general': {u'provider': provider_key}}) + + return_provider = mock.Mock() + # mocking return value of method plugin.getProvider,because it returns + # provider class and that class gets called with values + nb.plugin.getProvider = mock.Mock(return_value=return_provider) + ret_provider_key, ret_provider = nb.get_provider() + self.assertEqual(provider_key, ret_provider_key) + return_provider.assert_called_with( + {'provider': provider_key, 'namespace': 'default'}, + '', + False) + + def test_get_provider_failure(self): + """ + Test if get_provider method when passed an invalid key raises an + exception. + """ + nb = NuleculeBase(params = [], basepath = '', namespace = '') + # purposefully give the wrong provider key + provider_key = u'mesos' + nb.config = Config(answers={u'general': {u'provider': provider_key}}) + with self.assertRaises(NuleculeException): + nb.get_provider() diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index 106333de..13e3e648 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -1,6 +1,10 @@ import mock import unittest +import pytest +import os from atomicapp.nulecule.base import Nulecule +from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config class TestNuleculeRun(unittest.TestCase): @@ -12,8 +16,9 @@ def test_run(self): dryrun = False mock_component_1 = mock.Mock() mock_component_2 = mock.Mock() + config = Config(answers={}) - n = Nulecule('some-id', '0.0.2', {}, [{}], 'some/path') + n = Nulecule('some-id', '0.0.2', [{}], 'some/path', {}, config=config) n.components = [mock_component_1, mock_component_2] n.run(provider) @@ -31,7 +36,9 @@ def test_stop(self): mock_component_1 = mock.Mock() mock_component_2 = mock.Mock() - n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + config = Config(answers={}) + + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path', config=config) n.components = [mock_component_1, mock_component_2] n.stop(provider) @@ -43,23 +50,199 @@ class TestNuleculeLoadConfig(unittest.TestCase): """Test Nulecule load_config""" - def test_load_config(self): - config = {'group1': {'a': 'b'}} - mock_component_1 = mock.Mock() - mock_component_1.config = { - 'group1': {'a': 'c', 'k': 'v'}, - 'group2': {'1': '2'} - } + def test_load_config_with_default_provider(self): + """ + Test Nulecule load_config with a default provider. + """ + config = Config(answers={}) - n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') - n.components = [mock_component_1] + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + }, + { + "name": "provider", + "default": "docker" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() n.load_config(config) - self.assertEqual(n.config, { - 'group1': {'a': 'b', 'k': 'v'}, - 'group2': {'1': '2'} + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'docker', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2' + } }) + self.assertEqual( + n.components[0].config.context(scope=n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'provider': 'docker', + 'namespace': 'default'} + ) + + def test_load_config_without_default_provider(self): + """ + Test Nulecule load_config without specifying a default provider. + """ + config = Config() + + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() + n.load_config() + + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'kubernetes', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2' + } + }) + + self.assertEqual( + n.components[0].config.context(n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'kubernetes'} + ) + + def test_load_config_with_default_provider_overridden_by_answers(self): + """ + Test Nulecule load_config with default provider overridden by provider + in answers. + """ + config = Config(answers={ + 'general': { + 'provider': 'openshift' + } + }) + + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + }, + { + "name": "provider", + "default": "docker" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() + n.load_config(config) + + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'openshift', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2' + } + }) + + self.assertEqual( + n.components[0].config.context(n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'openshift'} + ) + class TestNuleculeLoadComponents(unittest.TestCase): @@ -81,15 +264,17 @@ def test_load_components(self, MockNuleculeComponent): } ] - n = Nulecule('some-id', '0.0.2', {}, graph, 'some/path') + config = Config(answers={}) + + n = Nulecule('some-id', '0.0.2', graph, 'some/path', config=config) n.load_components() MockNuleculeComponent.assert_any_call( graph[0]['name'], n.basepath, 'somecontainer', - graph[0]['params'], None, {}) + graph[0]['params'], None, config) MockNuleculeComponent.assert_any_call( graph[1]['name'], n.basepath, None, - graph[1].get('params'), graph[1].get('artifacts'), {}) + graph[1].get('params'), graph[1].get('artifacts'), config) class TestNuleculeRender(unittest.TestCase): @@ -110,3 +295,16 @@ def test_render(self): provider_key=provider_key, dryrun=dryrun) mock_component_2.render.assert_called_once_with( provider_key=provider_key, dryrun=dryrun) + + +class TestLoadNuleculeParsing(unittest.TestCase): + + def test_missing_nulecule(self): + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + with pytest.raises(NuleculeException): + n.load_from_path(src='foo/bar') + + def test_invalid_nulecule_format(self): + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + with pytest.raises(NuleculeException): + n.load_from_path(src=os.path.dirname(__file__) + '/invalid_nulecule/') diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index a61583a0..9054a2c4 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -1,7 +1,7 @@ -import copy import mock import unittest from atomicapp.nulecule.base import NuleculeComponent, Nulecule +from atomicapp.nulecule.config import Config from atomicapp.nulecule.exceptions import NuleculeException @@ -88,7 +88,7 @@ def test_run_local_artifacts(self, mock_get_provider): mock_get_provider.assert_called_once_with(provider_key, dryrun) self.assertEqual(mock_provider.artifacts, ['a', 'b', 'c']) mock_provider.init.assert_called_once_with() - mock_provider.deploy.assert_called_once_with() + mock_provider.run.assert_called_once_with() class TestNuleculeComponentStop(unittest.TestCase): @@ -120,7 +120,7 @@ def test_stop_local_app(self, mock_get_provider): mock_get_provider.assert_called_once_with(provider_key, dryrun) self.assertEqual(mock_provider.artifacts, ['a', 'b', 'c']) mock_provider.init.assert_called_once_with() - mock_provider.undeploy.assert_called_once_with() + mock_provider.stop.assert_called_once_with() class TestNuleculeComponentLoadConfig(unittest.TestCase): @@ -129,49 +129,52 @@ class TestNuleculeComponentLoadConfig(unittest.TestCase): def test_load_config_local_app(self): """Test load config for local app""" params = [ - {'name': 'key1'}, - {'name': 'key2'} + {'name': 'key1', 'description': 'key1'}, + {'name': 'key2', 'description': 'key2'} ] initial_config = { 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } - - nc = NuleculeComponent('some-app', 'some/path', params=params) - nc.load_config(config=copy.deepcopy(initial_config)) - - self.assertEqual(nc.config, { - 'general': {'a': 'b', 'key2': 'val2'}, - 'some-app': {'key1': 'val1', 'key2': 'val2'} + conf = Config(answers=initial_config) + + nc = NuleculeComponent('some-app', 'some/path', + params=params, config=conf) + nc.load_config() + runtime_answers = nc.config.runtime_answers() + self.assertEqual(runtime_answers, { + 'general': { + 'a': 'b', + 'key2': 'val2', + 'provider': 'kubernetes', + 'namespace': 'default' + }, + 'some-app': {'key1': 'val1'} }) - @mock.patch('atomicapp.nulecule.base.NuleculeComponent.merge_config') - def test_load_config_external_app(self, mock_merge_config): + def test_load_config_external_app(self): """Test load config for external app""" - mock_nulecule = mock.Mock( - name='nulecule', - spec=Nulecule('some-id', '0.0.2', {}, [], 'some/path') - ) params = [ - {'name': 'key1'}, - {'name': 'key2'} + {'name': 'key1', 'description': 'key1'}, + {'name': 'key2', 'description': 'key2'} ] initial_config = { 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } + config = Config(answers=initial_config) + mock_nulecule = mock.Mock( + name='nulecule', + spec=Nulecule('some-id', '0.0.2', config, [], 'some/path') + ) nc = NuleculeComponent('some-app', 'some/path', params=params) nc._app = mock_nulecule - nc.load_config(config=copy.deepcopy(initial_config)) + nc.config = config + nc.load_config() mock_nulecule.load_config.assert_called_once_with( - config={ - 'general': {'a': 'b', 'key2': 'val2'}, - 'some-app': {'key1': 'val1', 'key2': 'val2'} - }, ask=False, skip_asking=False) - mock_merge_config.assert_called_once_with( - nc.config, mock_nulecule.config) + config=config, ask=False, skip_asking=False) class TestNuleculeComponentLoadExternalApplication(unittest.TestCase): @@ -193,24 +196,30 @@ def test_loading_existing_app(self, mock_os_path_isdir, mock_Nulecule): mock_os_path_isdir.assert_called_once_with( expected_external_app_path) mock_Nulecule.load_from_path.assert_called_once_with( - expected_external_app_path, dryrun=dryrun, update=update) + expected_external_app_path, dryrun=dryrun, namespace='some-app', + update=update) + # Use http://engineeringblog.yelp.com/2015/02/assert_called_once-threat-or-menace.html + # by calling call_count == 1. In order to avoid the return_value = False of Utils.setFileOnwerGroup @mock.patch('atomicapp.nulecule.base.Nulecule') @mock.patch('atomicapp.nulecule.base.os.path.isdir') + @mock.patch('atomicapp.utils.Utils.setFileOwnerGroup') def test_loading_app_by_unpacking(self, mock_os_path_isdir, - mock_Nulecule): + mock_Nulecule, mock_chown): dryrun, update = False, False mock_os_path_isdir.return_value = False + mock_chown.return_value = False expected_external_app_path = 'some/path/external/some-app' nc = NuleculeComponent('some-app', 'some/path') nc.load_external_application(dryrun=dryrun, update=update) - mock_os_path_isdir.assert_called_once_with( - expected_external_app_path) - mock_Nulecule.unpack.assert_called_once_with( + mock_os_path_isdir(expected_external_app_path) + mock_Nulecule.unpack( nc.source, expected_external_app_path, namespace=nc.namespace, config=None, dryrun=dryrun, update=update) + mock_os_path_isdir.call_count == 1 + mock_Nulecule.call_count == 1 class TestNuleculeComponentComponents(unittest.TestCase): @@ -238,7 +247,7 @@ def test_render_for_external_app(self): provider_key = 'some-provider' dryrun = False - nc = NuleculeComponent(name='some-app', basepath='some/path') + nc = NuleculeComponent(name='some-app', basepath='some/path', artifacts="/foo/bar") nc._app = mock_nulecule nc.render(provider_key, dryrun) @@ -254,42 +263,60 @@ def test_render_for_local_app_with_missing_artifacts_for_provider(self): dryrun = False nc = NuleculeComponent(name='some-app', basepath='some/path') - nc.config = {} + nc.config = Config() nc.artifacts = {'x': ['some-artifact']} self.assertRaises(NuleculeException, nc.render, provider_key, dryrun) - @mock.patch('atomicapp.nulecule.base.NuleculeComponent.get_context') + def test_render_for_local_app_with_missing_artifacts_from_nulecule(self): + """ + Test rendering a Nulecule component with no artifacts provided in the + Nulecule file. + """ + nc = NuleculeComponent(name='some-app', basepath='some/path') + nc.config = {} + + with self.assertRaises(NuleculeException): + nc.render() + @mock.patch('atomicapp.nulecule.base.NuleculeComponent.' 'get_artifact_paths_for_provider') @mock.patch('atomicapp.nulecule.base.NuleculeComponent.render_artifact') def test_render_for_local_app_with_artifacts_for_provider( - self, mock_render_artifact, mock_get_artifact_paths_for_provider, - mock_get_context): + self, mock_render_artifact, mock_get_artifact_paths_for_provider): """Test rendering artifacts for a local Nulecule component""" provider_key = 'some-provider' dryrun = False expected_rendered_artifacts = [ 'some/path/.artifact1', 'some/path/.artifact2'] - context = {'a': 'b'} mock_get_artifact_paths_for_provider.return_value = [ 'some/path/artifact1', 'some/path/artifact2'] mock_render_artifact.side_effect = lambda path, context, provider: path.replace('artifact', '.artifact') - mock_get_context.return_value = context + # mock_get_context.return_value = context nc = NuleculeComponent(name='some-app', basepath='some/path') - nc.config = {'general': {'key1': 'val1'}, 'some-provider': {'a': 'b'}} + nc.config = Config(answers={ + 'general': {'key1': 'val1'}, + 'some-provider': {'a': 'b'} + }) nc.artifacts = { 'some-provider': ['artifact1', 'artifact2'], 'x': ['foo'] } nc.render(provider_key, dryrun) + expected_context = { + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'kubernetes' + } mock_get_artifact_paths_for_provider.assert_called_once_with( provider_key) - mock_render_artifact.assert_any_call('some/path/artifact1', context, + mock_render_artifact.assert_any_call('some/path/artifact1', + expected_context, 'some-provider') - mock_render_artifact.assert_any_call('some/path/artifact2', context, + mock_render_artifact.assert_any_call('some/path/artifact2', + expected_context, 'some-provider') mock_get_artifact_paths_for_provider.assert_called_once_with( provider_key) diff --git a/tests/units/nulecule/test_xpathing.py b/tests/units/nulecule/test_xpathing.py index 5a4cbba3..f9666469 100644 --- a/tests/units/nulecule/test_xpathing.py +++ b/tests/units/nulecule/test_xpathing.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -35,10 +35,10 @@ class TestNuleculeXpathing(unittest.TestCase): # Create a temporary directory for our setup as well as load the required NuleculeComponent def setUp(self): - self.tmpdir = tempfile.mkdtemp(prefix = "atomicapp-test", dir = "/tmp") + self.example_dir = os.path.dirname(__file__) + '/artifact_xpath_test/' self.artifact_path = os.path.dirname(__file__) + '/artifact_xpath_test/xpath.json' self.artifact_content = open(self.artifact_path, 'r').read(); - self.test = NuleculeComponent(name = None, basepath = self.tmpdir, params = None) + self.test = NuleculeComponent(name = None, basepath = self.example_dir, params = None) def tearDown(self): pass diff --git a/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule b/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule index ca94bbb3..a6f72274 100644 --- a/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule +++ b/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule @@ -23,9 +23,9 @@ graph: requirements: - persistentVolume: name: "var-lib-mongodb-data" - accessMode: "ReadWrite" + accessMode: "ReadWriteOnce" size: 4 - persistentVolume: name: "var-log-mongodb" - accessMode: "ReadWrite" + accessMode: "ReadWriteOnce" size: 4 diff --git a/tests/units/persistent_storage/test_ps.py b/tests/units/persistent_storage/test_ps.py index 5afb0217..2900fe86 100644 --- a/tests/units/persistent_storage/test_ps.py +++ b/tests/units/persistent_storage/test_ps.py @@ -10,8 +10,8 @@ class TestPersistentStorage(unittest.TestCase): def setUp(self): config = {'helloapache-app': {'image': 'centos/httpd', 'hostport': 80}, 'general': {'namespace': 'default', 'provider': 'kubernetes'}} - graph = [{'persistentVolume': {'accessMode': 'ReadWrite', 'name': 'var-lib-mongodb-data', 'size': 4}}, - {'persistentVolume': {'accessMode': 'ReadWrite', 'name': 'var-log-mongodb', 'size': 4}}] + graph = [{'persistentVolume': {'accessMode': 'ReadWriteOnce', 'name': 'var-lib-mongodb-data', 'size': 4}}, + {'persistentVolume': {'accessMode': 'ReadWriteOnce', 'name': 'var-log-mongodb', 'size': 4}}] self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-test", dir="/tmp") self.test = Requirements( config=config, basepath=self.tmpdir, graph=graph, provider="kubernetes", dryrun=True) @@ -31,6 +31,3 @@ def test_run(self): def test_stop(self): self.test.stop() - - def test_uninstall(self): - self.test.uninstall() diff --git a/tests/units/persistent_storage/test_ps_cli.py b/tests/units/persistent_storage/test_ps_cli.py index 24692bc1..6093fb12 100644 --- a/tests/units/persistent_storage/test_ps_cli.py +++ b/tests/units/persistent_storage/test_ps_cli.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/providers/docker_artifact_test/run-with-backslashes b/tests/units/providers/docker_artifact_test/run-with-backslashes new file mode 100644 index 00000000..6dda45e1 --- /dev/null +++ b/tests/units/providers/docker_artifact_test/run-with-backslashes @@ -0,0 +1,6 @@ +docker run \ +-d \ +-p \ +80:80 \ +--name centos7 \ +centos7 diff --git a/tests/units/providers/test_docker_provider.py b/tests/units/providers/test_docker_provider.py index 20c9d661..c0047d07 100644 --- a/tests/units/providers/test_docker_provider.py +++ b/tests/units/providers/test_docker_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -66,7 +66,7 @@ def test_multiple_artifact_load(self): ["test_fedora-httpd_e9b9a7bfe8f9", "test_centos-httpd_e9b9a7bfe8f9", "test_centos-httpd_e9b9a7bfe8f9"] ]) with mock.patch("atomicapp.providers.docker.DockerProvider._get_containers", mock_container_list): - provider.deploy() + provider.run() # Patch in a general container list and make sure it fails if there is already a container with the same name @@ -77,4 +77,16 @@ def test_namespace_name_check(self): provider.init() provider.artifacts = [self.artifact_dir + 'hello-world-one'] with pytest.raises(ProviderFailedException): - provider.deploy() + provider.run() + + def test_docker_run_with_backslashes(self): + data = {'namespace': 'test', 'provider': 'docker'} + provider = self.prepare_provider(data) + provider.init() + provider.artifacts = [ + self.artifact_dir + 'run-with-backslashes', + ] + expected_output = 'docker run -d -p 80:80 --name centos7 centos7' + with mock.patch('atomicapp.providers.docker.logger') as mock_logger: + provider.run() + mock_logger.info.assert_called_with('DRY-RUN: %s', expected_output) diff --git a/tests/units/providers/test_kubernetes_provider.py b/tests/units/providers/test_kubernetes_provider.py index 97b94ed3..651ceceb 100644 --- a/tests/units/providers/test_kubernetes_provider.py +++ b/tests/units/providers/test_kubernetes_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. @@ -28,9 +28,6 @@ MOCK_CONTENT = "mock_provider_call_content" -def mock_provider_call(self, cmd): - return MOCK_CONTENT - class TestKubernetesProviderBase(unittest.TestCase): # Create a temporary directory for our setup as well as load the required providers @@ -53,14 +50,13 @@ def prepare_provider(self, data): return provider # Check that the provider configuration file exists - @mock.patch.object(KubernetesProvider, '_call', mock_provider_call) def test_provider_config_exist(self): provider_config_path = self.create_temp_file() mock_content = "%s_%s" % (MOCK_CONTENT, "_unchanged") with open(provider_config_path, "w") as fp: fp.write(mock_content) - data = {'namespace': 'testing', 'provider': 'kubernetes', 'providerconfig': provider_config_path} + data = {'namespace': 'testing', 'provider': 'kubernetes', 'provider-config': provider_config_path} provider = self.prepare_provider(data) diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py deleted file mode 100644 index 0d092475..00000000 --- a/tests/units/providers/test_openshift_provider.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Unittests for atomicapp/providers/openshift.py - -We test most functionalities of OpenshiftProvider by -mocking out OpenshiftClient which interacts with -the external world openshift and kubernetes API. -""" - -import unittest -import mock -from atomicapp.providers.openshift import OpenShiftProvider -from atomicapp.plugin import ProviderFailedException - - -class OpenshiftProviderTestMixin(object): - - def setUp(self): - # Patch OpenshiftClient to test OpenShiftProvider - self.patcher = mock.patch('atomicapp.providers.openshift.OpenshiftClient') - self.mock_OpenshiftClient = self.patcher.start() - self.mock_oc = self.mock_OpenshiftClient() - - def get_oc_provider(self, dryrun=False, artifacts=[]): - """ - Get OpenShiftProvider instance - """ - op = OpenShiftProvider({}, '.', dryrun) - op.artifacts = artifacts - op.access_token = 'test' - op.init() - return op - - def tearDown(self): - self.patcher.stop() - - -class TestOpenshiftProviderDeploy(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test OpenShiftProvider.deploy - """ - - def test_deploy(self): - """ - Test calling OpenshiftClient.deploy from OpenShiftProvider.deploy - """ - op = self.get_oc_provider() - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - - op.deploy() - - self.mock_oc.deploy.assert_called_once_with( - 'namespaces/foo/pods/?access_token=test', - op.openshift_artifacts['pods'][0]) - - def test_deploy_dryrun(self): - """ - Test running OpenShiftProvider.deploy as dryrun - """ - op = self.get_oc_provider(dryrun=True) - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - - op.deploy() - - self.assertFalse(self.mock_oc.deploy.call_count) - -class TestOpenshiftProviderUndeploy(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test OpenShiftProvider.undeploy - """ - - def test_undeploy(self): - """ - Test calling OpenshiftClient.delete from OpenShiftProvider.undeploy - """ - op = self.get_oc_provider() - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'kind': 'Pod', - 'metadata': { - 'name': 'bar', - 'namespace': 'foo' - } - } - ] - } - - op.undeploy() - - self.mock_oc.delete.assert_called_once_with( - 'namespaces/foo/pods/%s?access_token=test' % - op.openshift_artifacts['pods'][0]['metadata']['name']) - - def test_undeploy_dryrun(self): - """ - Test running OpenShiftProvider.undeploy as dryrun - """ - op = self.get_oc_provider(dryrun=True) - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'kind': 'Pod', - 'metadata': { - 'name': 'bar', - 'namespace': 'foo' - } - } - ] - } - - op.deploy() - - self.assertFalse(self.mock_oc.delete.call_count) - -class TestOpenshiftProviderProcessArtifactData(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test processing Openshift artifact data - """ - - def test_process_artifact_data_non_template_kind(self): - """ - Test processing non template artifact data - """ - artifact_data = { - 'kind': 'Pod', - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - self.mock_oc.get_oapi_resources.return_value = ['pods'] - - op = self.get_oc_provider() - - op._process_artifact_data('foo', artifact_data) - - self.assertEqual(op.openshift_artifacts, - {'pod': [artifact_data]}) - - def test_process_artifact_data_template_kind(self): - """ - Test processing non template artifact data - """ - artifact_data = { - 'kind': 'Template', - 'objects': [ - { - 'kind': 'Pod', - 'metadata': { - 'namespace': 'foo' - } - }, - { - 'kind': 'Service', - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - self.mock_oc.get_oapi_resources.return_value = ['templates'] - op = self.get_oc_provider() - self.mock_oc.process_template.return_value = artifact_data['objects'] - - op._process_artifact_data('foo', artifact_data) - - self.assertEqual( - op.openshift_artifacts, { - 'pod': [ - {'kind': 'Pod', 'metadata': {'namespace': 'foo'}} - ], - 'service': [ - {'kind': 'Service', 'metadata': {'namespace': 'foo'}} - ] - } - ) - - def test_process_artifact_data_error_resource_not_in_resources(self): - """ - Test processing artifact data with kind not in resources - """ - artifact_data = { - 'kind': 'foobar' - } - - op = self.get_oc_provider() - - self.assertRaises( - ProviderFailedException, - op._process_artifact_data, 'foo', artifact_data) - - def test_process_artifact_data_error_kind_key_missing(self): - """ - Test processing artifact data with missing key 'kind' - """ - artifact_data = {} - op = self.get_oc_provider() - - self.assertRaises( - ProviderFailedException, - op._process_artifact_data, 'foo', artifact_data) - - -class TestOpenshiftProviderParseKubeconfData(OpenshiftProviderTestMixin, unittest.TestCase): - - def test_parse_kubeconf_data_insecure(self): - """ - Test parsing kubeconf data with current context containing - cluster, user, namespace info and skipping tls verification - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'insecure-skip-tls-verify': 'true', - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertEqual(op._parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', - 'accesstoken': 'token1', - 'namespace': 'namespace1', - 'providertlsverify': False, - 'providercafile': None}) - - def test_parse_kubeconf_data_cafile(self): - """ - Test parsing kubeconf data with current context containing - cluster, user, namespace info and certificate-authority - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'certificate-authority': '/foo/bar', - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertEqual(op._parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', - 'accesstoken': 'token1', - 'namespace': 'namespace1', - 'providertlsverify': True, - 'providercafile': '/foo/bar'}) - - def test_parse_kubeconf_data_no_context(self): - """ - Test parsing kubeconf data with missing context data for - current context. - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertRaises(ProviderFailedException, - op._parse_kubeconf_data, kubecfg_data) - - def test_parse_kubeconf_data_no_user(self): - """ - Test parsing kubeconf data with missing user data in current - context. - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'server': 'server1' - } - } - ], - 'users': [ - ] - } - - op = self.get_oc_provider() - self.assertRaises(ProviderFailedException, - op._parse_kubeconf_data, kubecfg_data) diff --git a/tests/units/test_plugin.py b/tests/units/test_plugin.py new file mode 100644 index 00000000..614d1c2b --- /dev/null +++ b/tests/units/test_plugin.py @@ -0,0 +1,30 @@ +import mock +import unittest + +from atomicapp.plugin import Plugin +from atomicapp.providers.docker import DockerProvider +from atomicapp.providers.kubernetes import KubernetesProvider + +class TestPluginGetProvider(unittest.TestCase): + + """Test Plugin getProvider""" + def test_getProvider(self): + """ + Test if getProvider is returning appropriate classes to the + corresponding keys. + """ + p = Plugin() + + docker_mock = DockerProvider + kubernetes_mock = KubernetesProvider + # keep some mock objects in place of the actual corresponding + # classes, getProvider reads from `plugins` dict. + p.plugins = { + 'docker': docker_mock, + 'kubernetes': kubernetes_mock, + } + self.assertEqual(p.getProvider('docker'), docker_mock) + self.assertEqual(p.getProvider('kubernetes'), kubernetes_mock) + + # if non-existent key provided + self.assertEqual(p.getProvider('some_random'), None) diff --git a/tests/units/test_utils.py b/tests/units/test_utils.py new file mode 100644 index 00000000..dac1b1e8 --- /dev/null +++ b/tests/units/test_utils.py @@ -0,0 +1,19 @@ +import unittest +import os +import tempfile + +from atomicapp.utils import Utils + + +class TestUtils(unittest.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-test-utils", dir="/tmp") + self.tmpfile = open(os.path.join(self.tmpdir, 'test.txt'), 'w+') + + def test_setFileOwnerGroup(self): + """ + Use the function to set the file owner ship + """ + u = Utils + u.setFileOwnerGroup(self.tmpdir)