diff --git a/.gitignore b/.gitignore index 7430aae3..38a7623a 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ _site # Other # ######### +*.json migrations/ make_video.sh celerybeat-schedule.db diff --git a/CHANGELOG.md b/CHANGELOG.md index d69a0407..eb38fcad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ represented by the pull requests that fixed them. Critical items to know are: ## [master](https://github.com/singularityhub/sregistry/tree/master) (master) + - Addition of Google Cloud Build, versioning, tags to collections (1.1.0) - adding BitBucket authentication backend - updating sregistry-cli to 0.0.97, catching OSError earlier - updating sregistry-cli to 0.0.96, and Singularity download url to use sylabs organization diff --git a/Dockerfile b/Dockerfile index c92ccb54..49fa0269 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,11 +33,6 @@ RUN apt-get update && apt-get install -y \ squashfs-tools \ build-essential -# Install Singularity -RUN git clone -b vault/release-2.6 https://www.github.com/sylabs/singularity.git -WORKDIR singularity -RUN ./autogen.sh && ./configure --prefix=/usr/local && make && make install - # Install Python requirements out of /tmp so not triggered if other contents of /code change ADD requirements.txt /tmp/requirements.txt RUN pip install --upgrade pip @@ -47,7 +42,7 @@ ADD . /code/ ################################################################################ # PLUGINS -# You are free to comment out those plugins that you don't want to use +# You are free to uncomment the plugins that you want to use # Install LDAP (uncomment if wanted) # RUN pip install python3-ldap @@ -56,6 +51,9 @@ ADD . /code/ # Install PAM Authentication (uncomment if wanted) # RUN pip install django-pam +# Ensure Google Build Installed +# RUN pip install sregistry[google-build] + # Install Globus (uncomment if wanted) # RUN /bin/bash /code/scripts/globus/globus-install.sh diff --git a/README.md b/README.md index b60dd36d..02275471 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,10 @@ to work together using [docker-compose.yml](docker-compose.yml). The images are the following: - **vanessa/sregistry**: is the main uwsgi application, which serves a Django (python-based) application. - - **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly. - - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [celery](http://www.celeryproject.org/), a distributed job queue that fits nicely into Django. The celery worker uses a + - **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly. Note that we build a custom nginx image that takes advantage of the [nginx upload module](https://www.nginx.com/resources/wiki/modules/upload/). + - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [django-rq](https://github.com/rq/django-rq) that uses a - **redis**: database to organize the jobs themselves. + - **scheduler** jobs can be scheduled using the scheduler. For more information about Singularity Registry Server, please reference the [docs](https://singularityhub.github.io/sregistry). If you have any issues, diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..9084fa2f --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.1.0 diff --git a/docker-compose.yml b/docker-compose.yml index dc7f2a55..c3365e6b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,13 +32,21 @@ nginx: redis: restart: always image: redis:latest - ports: - - "6379:6379" +scheduler: + image: vanessa/sregistry + command: python /code/manage.py rqscheduler + volumes: + - .:/code + volumes_from: + - uwsgi + links: + - redis + - db worker: image: vanessa/sregistry - command: celery worker -A shub.celery -Q default -n default@%h -B + command: python /code/manage.py rqworker default volumes: - .:/code volumes_from: diff --git a/docs/assets/img/api-docs.png b/docs/assets/img/api-docs.png new file mode 100644 index 00000000..8a302030 Binary files /dev/null and b/docs/assets/img/api-docs.png differ diff --git a/docs/assets/img/api-swagger.png b/docs/assets/img/api-swagger.png new file mode 100644 index 00000000..400fc174 Binary files /dev/null and b/docs/assets/img/api-swagger.png differ diff --git a/docs/assets/img/google-build-collection.png b/docs/assets/img/google-build-collection.png new file mode 100644 index 00000000..62669dd6 Binary files /dev/null and b/docs/assets/img/google-build-collection.png differ diff --git a/docs/assets/img/google-build-connect-github.png b/docs/assets/img/google-build-connect-github.png new file mode 100644 index 00000000..fb3246d8 Binary files /dev/null and b/docs/assets/img/google-build-connect-github.png differ diff --git a/docs/assets/img/google-build-new-collection.png b/docs/assets/img/google-build-new-collection.png new file mode 100644 index 00000000..3c5a8eea Binary files /dev/null and b/docs/assets/img/google-build-new-collection.png differ diff --git a/docs/assets/img/google-build-repos.png b/docs/assets/img/google-build-repos.png new file mode 100644 index 00000000..c25b57f9 Binary files /dev/null and b/docs/assets/img/google-build-repos.png differ diff --git a/docs/assets/img/google-cloud-build-hash.png b/docs/assets/img/google-cloud-build-hash.png new file mode 100644 index 00000000..db16ae43 Binary files /dev/null and b/docs/assets/img/google-cloud-build-hash.png differ diff --git a/docs/assets/img/google-cloud-build-steps.png b/docs/assets/img/google-cloud-build-steps.png new file mode 100644 index 00000000..9002d96a Binary files /dev/null and b/docs/assets/img/google-cloud-build-steps.png differ diff --git a/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg b/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg new file mode 100644 index 00000000..441e5cf8 Binary files /dev/null and b/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg differ diff --git a/docs/pages/client.md b/docs/pages/client.md index fa1cacf5..0a80aa74 100644 --- a/docs/pages/client.md +++ b/docs/pages/client.md @@ -6,15 +6,29 @@ permalink: /client toc: false --- - - +## Singularity Pull + +Singularity Registry Server implements a basic version of the Sylabs Library API, +meaning that you can pull a container with Singularity directly. For example, +let's say that I have a collection with a container called `collection/container:tag`. +and my registry is served at `containers.page`. I could pull it as follows: + +```bash +$ singularity pull --library https://containers.page collection/container:tag +``` + +# SRegistry Client + +Singularity Registry Global Client, or [sregistry-cli](https://github.com/singularityhub/sregistry-cli), +is a general client to interact with Singularity images at remote endpoints, and it provides +such an endpoint for Singularity Registry Server. We will provide +basic instructions here, and for the full documentation, please see the [getting started guide here](https://singularityhub.github.io/sregistry-cli/client-registry). Note that you will need to [export your credentials](https://singularityhub.github.io/sregistry/credentials) in order to have authenticated interaction with sregistry. -The original Singularity Registry Client was provided by [Singularity Python](https://github.com/singularityware/singularity-python), however we have moved the client to have its own module under [sregistry-cli](https://github.com/singularityhub/sregistry-cli). We recommend that you use the latter, and ask for features or updates when necessary. For the new version, see the [getting started guide here](https://singularityhub.github.io/sregistry-cli/client-registry). Note that you will need to [export your credentials](https://singularityhub.github.io/sregistry/credentials) in order to have authenticated -interaction with sregistry. ## Install ### sregistry Installation + `sregistry` is the client for Singularity Registry server. To install, you can do the following: ``` diff --git a/docs/pages/install/builders.md b/docs/pages/install/builders.md new file mode 100644 index 00000000..b283a172 --- /dev/null +++ b/docs/pages/install/builders.md @@ -0,0 +1,18 @@ +--- +layout: default +title: Custom Builders and Storage +pdf: true +permalink: /install-builders +toc: false +--- + +Currently, we support custom installation of the following builder and storage pairs. Each of these is provided +as a plugin, so you can enable them in the same way. Instructions are included with the links below. + + - [Google Cloud Build + Storage]({{ site.baseurl }}/plugin-google-build) + +Don't forget to go back to the [install docs](https://singularityhub.github.io/sregistry/install-server#storage) where you left off. + +
+ +

diff --git a/docs/pages/install/containers.md b/docs/pages/install/containers.md index d4eb3df1..7a5272a1 100644 --- a/docs/pages/install/containers.md +++ b/docs/pages/install/containers.md @@ -8,12 +8,13 @@ toc: true # Installation: Start Containers -Whether you build or not, the compose command will bring up the application (and download containers provided on Docker Hub, if they aren't in your cache). +Whether you build or not, the compose command will bring up the application +(and download containers provided on Docker Hub, if they aren't in your cache). ## What containers are provided? -Singularity Registy Server uses the following images, all provided on Docker Hub (or you can build the registry-specific ones -locally): +Singularity Registy Server uses the following images, all provided on Docker Hub +(or you can build the registry-specific ones locally): - [vanessa/sregistry](https://hub.docker.com/r/vanessa/sregistry): is the core application image, generated from the Dockerfile in the base of the repository. - [vanessa/sregistry_nginx](https://hub.docker.com/r/vanessa/sregistry_nginx/): Is the nginx container installed with the nginx upload module, intended for use with speedy uploads. It is generated from the subfolder "nginx" in the repository. @@ -23,17 +24,24 @@ To use these images provided, you can bring up the containers like so: ## Start Containers ```bash -docker-compose up -d +$ docker-compose up -d ``` -The `-d` means detached, and that you won't see any output (or errors) to the console. You can easily restart and stop containers, either specifying the container name(s) or leaving blank to apply to all containers. Note that these commands must be run in the folder with the `docker-compose.yml`: +The `-d` means detached, and that you won't see any output (or errors) to the +console. You can easily restart and stop containers, either specifying the +container name(s) or leaving blank to apply to all containers. Note that these +commands must be run in the folder with the `docker-compose.yml`: ```bash -docker-compose restart uwsgi worker nginx -docker-compose stop +$ docker-compose restart uwsgi worker nginx +$ docker-compose stop ``` -When you do `docker-compose up -d` the application should be available at `http://127.0.0.1/`, and if you've configured https, `https://127.0.0.1/`. If you need to shell into the application, for example to debug with `python manage.py shell` you can get the container id with `docker ps` and then do: +When you do `docker-compose up -d` the application should be available at +`http://127.0.0.1/`, and if you've configured https, `https://127.0.0.1/`. +If you need to shell into the application, for example to debug with +`python manage.py shell` you can get the container id with `docker ps` +and then do: ```bash NAME=$(docker ps -aqf "name=sregistry_uwsgi_1") diff --git a/docs/pages/install/server.md b/docs/pages/install/server.md index f0fbb62d..3645c375 100644 --- a/docs/pages/install/server.md +++ b/docs/pages/install/server.md @@ -7,9 +7,11 @@ toc: true --- # Installation: Web Server and Storage + Before doing `docker-compose up -d` to start the containers, there are some specific things that need to be set up. ## Nginx + This section is mostly for your FYI. The nginx container that we use is a custom compiled nginx that includes the [nginx uploads module](https://www.nginx.com/resources/wiki/modules/upload/). This allows us to define a server block that will accept multipart form data directly, and @@ -60,6 +62,7 @@ nginx: the image will be built from the `nginx` folder provided in the repository. ## Under Maintenance Page + If it's ever the case that the Docker images need to be brought down for maintenance, a static fallback page should be available to notify the user. If you noticed in the [prepare_instance.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/prepare_instance.sh) script, one of the things we installed is nginx (on the instance). This is because we need to use it to get proper certificates for our domain (for https). Before you do this, you might want to copy the index that we've provided to replace the default (some lame page that says welcome to Nginx!) to one that you can show when the server is undergoing maintainance. ```bash @@ -67,13 +70,34 @@ cp $INSTALL_ROOT/sregistry/scripts/nginx-index.html /var/www/html/index.html rm /var/www/html/index.nginx-debian.html ``` +If you want your page to use the same SSL certificates, a nginx-default.conf is also +provided that will point to the same certificates on the server (generation discussed later): + +```bash +cp $INSTALL_ROOT/sregistry/scripts/nginx-default.conf /etc/nginx/conf.d/default.conf +``` + If you don't care about user experience during updates and server downtime, you can just ignore this. +## Custom Domain + +In the [config settings file](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py#L30) +you'll find a section for domain names, and other metadata about your registry. You will need to update +this to be a custom hostname that you use, and custom names and unique resource identifiers for your +registry. For example, if you have a Google Domain and are using Google Cloud, you should be able to set it up using [Cloud DNS](https://console.cloud.google.com/net-services/dns/api/enable?nextPath=%2Fzones&project=singularity-static-registry&authuser=1). Usually this means +creating a zone for your instance, adding a Google Domain, and copying the DNS records for +the domain into Google Domains. Sometimes it can take a few days for changes to propogate. +We will discuss setting up https in a later section. + ## Storage -The containers that you upload to your registry will be stored "inside" the Docker container, specifically at the location `/var/www/images`. By default, we map this location to the host in the base directory of `sregistry` in a folder called `images`. Equally, we map static web files to a folder named `static`. If you look in the [docker-compose.yml](https://github.com/singularityhub/sregistry/blob/master/docker-compose.yml) that looks something like this: +By default, the containers that you upload to your registry will be stored "inside" the Docker container, specifically at the location `/var/www/images`. While it would not be reasonable to upload to Singularity Registry and then to a custom Storage, we have recently added +[custom builders]({{ site.url }}/install-builders) that can be used to push a recipe to Singularity Registry Server, and then trigger a cloud build that will be saved in some matching cloud storage. -``` +If you choose the file system default storage, we map this location to the host in the base directory of `sregistry` in a folder called `images`. Equally, we map static web files to a folder named `static`. If you look in the [docker-compose.yml](https://github.com/singularityhub/sregistry/blob/master/docker-compose.yml) that looks something like this: + + +```yaml - ./static:/var/www/static - ./images:/var/www/images ``` @@ -87,7 +111,17 @@ Thus, you are free to test different configurations of mounting this folder. If ## SSL -Getting https certificates is really annoying, and getting `dhparams.pem` takes forever. But after the domain is obtained, it's important to do. Again, remember that we are working on the host, and we have an nginx server running. You should follow the instructions (and I do this manually) in [generate_cert.sh](../scripts/generate_cert.sh). It basically comes down to: + +Getting https certificates is really annoying, and getting `dhparams.pem` takes forever. But after the domain is obtained, it's important to do. Again, remember that we are working on the host, and we have an nginx server running. You should follow the instructions (and I do this manually) in [generate_cert.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert.sh). + + - starting nginx + - installing certbot + - generating certificates + - linking them to where the docker-compose expects them + - add a reminder or some other method to renew within 89 days + +With certbot, you should be able to run `certbot renew` when the time to renew comes up. There is also an [older +version](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert_tiny-acme.sh) that uses tiny-acme instead of certbot. For this second option, it basically comes down to: - starting nginx - installing tiny acme @@ -96,53 +130,22 @@ Getting https certificates is really annoying, and getting `dhparams.pem` takes - moving them to where they need to be. - add a reminder or some other method to renew within 89 days -Once you have done this, you should use the `docker-compose.yml` and the `nginx.conf` provided in the folder [https](https). So do something like this: +Once you have done this (and you are ready for https), you should use the `docker-compose.yml` and the `nginx.conf` provided in the folder [https](https://github.com/singularityhub/sregistry/blob/master/https/). So do something like this: ```bash mkdir http mv nginx.conf http mv docker-compose.yml http -mv https/docker-compose.yml $PWD -mv https/nginx.conf $PWD +cp https/docker-compose.yml . +cp https/nginx.conf.https nginx.conf ``` -Most importantly, we use a text file to make sure that we generate a single certificate that covers both www* and without. This part of the [generate_cert.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert.sh) you will need to update the location (town, city, etc) along with your email and the domain you are using: - -```bash -cat > csr_details.txt <<-EOF -[req] -default_bits = 2048 -prompt = no -default_md = sha256 -req_extensions = req_ext -distinguished_name = dn - -[ dn ] -C=US -ST=California -L=San Mateo County -O=End Point -OU=SingularityRegistry -emailAddress=youremail@university.edu -CN = www.domain.edu - -[ req_ext ] -subjectAltName = @alt_names - -[ alt_names ] -DNS.1 = domain.edu -DNS.2 = www.domain.edu -EOF -``` - -Specifically, pay close attention to the fields in the last two sections that need to be customized for the domain and region. - If you run into strange errors regarding any kind of authentication / server / nginx when you start the images, likely it has to do with not having moved these files, or a setting about https in the [settings](https://github.com/singularityhub/sregistry/tree/master/shub/settings). If you have trouble, please post an issue on the [issues board](https://www.github.com/singularityhub/sregistry/issues) and I'd be glad to help. ## Build the Image (Optional) -If you want to try it, you can build the image. Note that this step isn't necessary as the image is provided on [Docker Hub](https://hub.docker.com/r/vanessa/sregistry/). This step is optional - if you want to try building locally, you would do: +If you want to try it, you can build the image. Note that this step isn't necessary as the image is provided on [Docker Hub](https://hub.docker.com/r/vanessa/sregistry/). This step is optional. However, if you are developing you likely want to build the image locally. You can do: ```bash diff --git a/docs/pages/install/settings.md b/docs/pages/install/settings.md index 1153fecc..34402521 100644 --- a/docs/pages/install/settings.md +++ b/docs/pages/install/settings.md @@ -33,6 +33,7 @@ SECRET_KEY = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' ``` ### Authentication Secrets + One thing I (@vsoch) can't do for you in advance is produce application keys and secrets to give your Registry for each social provider that you want to allow users (and yourself) to login with. We are going to use a framework called [python social auth](https://python-social-auth-docs.readthedocs.io/en/latest/configuration/django.html) to achieve this, and in fact you can add a [number of providers](http://python-social-auth-docs.readthedocs.io/en/latest/backends/index.html) (I have set up a lot of them, including SAML, so please submit an issue if you want one added to the base proper.). Singularity Registry uses OAuth2 with a token--> refresh flow because it gives the user power to revoke permission at any point, and is a more modern strategy than storing a database of usernames and passwords. You can enable or disable as many of these that you want, and this is done in the [settings/config.py](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py): ```python @@ -44,14 +45,19 @@ ENABLE_GITLAB_AUTH=False ENABLE_BITBUCKET_AUTH=False ``` -and you will need at least one to log in. I've found that Github works the fastest and easiest, and then Google. Twitter now requires an actual server name and won't work with localost, but if you are deploying on a server with a proper domain go ahead and use it. All avenues are extremely specific with regard to callback urls, so you should be very careful in setting them up. +and you will need at least one to log in. I've found that Github works the fastest and easiest, and then Google. Twitter now requires an actual server name and won't work with localost, but if you are deploying on a server with a proper domain go ahead and use it. All avenues are extremely specific with regard to callback urls, so you should be very careful in setting them up. If you want automated builds from a repository +integration with Google Cloud Build, then you must use GitHub. -Other authentication methods, such as LDAP, are implemented as [plugins](https://singularityhub.github.io/sregistry/plugins/) to sregistry. See the [plugins documentation](https://singularityhub.github.io/sregistry/plugins/) for details on how to configure these. +#### Plugins +Other authentication methods, such as LDAP, are implemented as [plugins](https://singularityhub.github.io/sregistry/plugins/) to sregistry. See the [plugins documentation](https://singularityhub.github.io/sregistry/plugins/) for details on how to configure these. You should also now look here to see which plugins you will +want to set up (and then build into your container). -We will walk through the setup of each in detail. For all of the below, you should put the content in your `secrets.py` under settings. Note that if you are deploying locally, you will need to put localhost (127.0.0.1) as your domain, and Github is now the only one that worked reliably without an actual domain for me. +For authentication plugins, we will walk through the setup of each in detail here. +For other plugins, you should look at the [plugins](https://singularityhub.github.io/sregistry/plugins/) documentation now before proceeding. For all of the below, you should put the content in your `secrets.py` under settings. Note that if you are deploying locally, you will need to put localhost (127.0.0.1) as your domain, and Github is now the only one that worked reliably without an actual domain for me. #### Google OAuth2 + You first need to [follow the instructions](https://developers.google.com/identity/protocols/OpenIDConnect) and setup an OAuth2 API credential. The redirect URL should be every variation of having http/https, and www. and not. (Eg, change around http-->https and with and without www.) of `https://www.sregistry.org/complete/google-oauth2/`. Google has good enough debugging that if you get this wrong, it will give you an error message with what is going wrong. You should store the credential in `secrets.py`, along with the complete path to the file for your application: @@ -153,7 +159,7 @@ the callback url here should be `http://[your-domain]/complete/twitter`. ### Config -In the [config.py](../shub/settings/config.py) you need to define the following: +In the [config.py](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py) you need to define the following: #### Domain Name @@ -176,7 +182,7 @@ You need to define a registry uri, and different contact information: ``` HELP_CONTACT_EMAIL = 'vsochat@stanford.edu' -HELP_INSTITUTION_SITE = 'srcc.stanford.edu' +HELP_INSTITUTION_SITE = 'https://srcc.stanford.edu' REGISTRY_NAME = "Tacosaurus Computing Center" REGISTRY_URI = "taco" ``` @@ -194,6 +200,17 @@ USER_COLLECTIONS=True Setting `USER_COLLECTIONS` to False also means that users cannot create [Teams](/sregistry/setup#teams), which are organized groups of users that then can be added as contributors to individual collections. With this setting as True, any authenticated user, staff, or administrator can create and manage new collections and teams, and this is done by issuing a token. +Finally, you can also allow users to create collections, but limit the number created. + +``` +# Limit users to N collections (None is unlimited) +USER_COLLECTION_LIMIT = None +``` + +The default is None, meaning that users can create unlimited collections, given that `USER_COLLECTIONS` +is True. If you set this to a non-zero positive integer, user collections will be limited to +this number. If a user is staff or an administrator, they are not subject to this limit. + #### Registry Private By default Singularity Registry will provide public images, with an option to set them to private. If you are working with sensitive data and/or images, you might want all images to be private, with no option to make public. You can control that with the variable `PRIVATE_ONLY`. diff --git a/docs/pages/introduction.md b/docs/pages/introduction.md index 1254813c..04aeff72 100644 --- a/docs/pages/introduction.md +++ b/docs/pages/introduction.md @@ -177,8 +177,9 @@ As was stated in the base [README.md](/sregistry/) The components of the applica - **vanessa/sregistry**: is the main uwsgi application, which serves a Django (python-based) application. - **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly. Note that we build a custom nginx image that takes advantage of the [nginx upload module](https://www.nginx.com/resources/wiki/modules/upload/). - - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [celery](http://www.celeryproject.org/), a distributed job queue that fits nicely into Django. The celery worker uses a + - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [django-rq](https://github.com/rq/django-rq) that uses a - **redis**: database to organize the jobs themselves. + - **scheduler** jobs can be scheduled using the scheduler. This means that, given a pretty basic server to run the application, and enough space connected to it to store the images, you can bring the entire thing up relatively quickly. Awesome! Let's get started and talk about first steps of [install](/sregistry/install). Or read about [use cases first](/sregistry/use-cases) diff --git a/docs/pages/plugins/README.md b/docs/pages/plugins/README.md index b902e11f..b4dc434e 100644 --- a/docs/pages/plugins/README.md +++ b/docs/pages/plugins/README.md @@ -2,7 +2,7 @@ layout: default title: Plugins pdf: true -permalink: /plugins +permalink: /plugins/ toc: false --- @@ -24,12 +24,12 @@ your registries' local `shub/settings/secrets.py` file. - [PAM-Auth](/sregistry/plugin-pam): authentication using PAM (unix host users) - [Globus](/sregistry/plugin-globus): connect and transfer using Globus - [SAML](/sregistry/plugin-saml): Authentication with SAML + - [Google Build](/sregistry/plugin-google-build) provides build and storage on Google Cloud. ## Writing a Plugin An sregistry plugin is a Django App, that lives inside `shub/plugins/`. - -The plugin interface is currently under development. At present, each plugin: +Each plugin: - Must provide a `urls.py` listing any URLs that will be exposed under `/plugin-name` - Can provide additional, models, views, templates, static files. diff --git a/docs/pages/plugins/google_build/README.md b/docs/pages/plugins/google_build/README.md new file mode 100644 index 00000000..571f8c7d --- /dev/null +++ b/docs/pages/plugins/google_build/README.md @@ -0,0 +1,319 @@ +--- +layout: default +title: "Plugin: Custom Builder and Storage" +pdf: true +permalink: /plugin-google-build +toc: true +--- + +# Plugin: Google Cloud Build and Storage + +The Singularity Registry client allows for [a large set](https://singularityhub.github.io/sregistry-cli/clients) of options for external storage endpoints. Specifically, this plugin uses storage and build provided by Google, meaning: + + - [Google Build](https://singularityhub.github.io/sregistry-cli/client-google-build) + - [Google Storage](https://singularityhub.github.io/sregistry-cli/client-google-storage) + +Other cloud vendors have been included with sregistry client (AWS, S3, Minio) and equivalent +build and storage pairs can be added here. If you would like to discuss adding a builder +and storage pair, please [open an issue](https://www.github.com/singularityhub/sregistry). + +Don't forget to go back to the [install docs](https://singularityhub.github.io/sregistry/install-settings) where you left off. This quick start will walk through setting up custom storage using +[Google Cloud Build](https://singularityhub.github.io/sregistry-cli/client-google-build) +and [Google Storage](https://singularityhub.github.io/sregistry-cli/client-google-storage) as +an endpoint. + +## Configure sregistry + +By default, google build is disabled. To configure sregistry to +use Google Cloud build and Storage, in settings/config.py you can enable the plugin by +uncommenting it from the list here: + +```bash +PLUGINS_ENABLED = [ +# 'ldap_auth', +# 'saml_auth', +# 'globus', + 'google_build' +] +``` + +And uncomment installing the google build client in the Dockerfile: + +```bash +# Ensure Google Build Installed +# RUN pip install sregistry[google-build] +``` + +You will need to build the image locally, with other additional +changes (usually plugins) you want enabled: + +```bash +$ docker build -t vanessa/sregistry . +``` + +### Secrets + +Next, set the following variables in `shub/settings/secrets.py`, +that you can create from `dummy_secrets.py` in the shub/settings folder. +The first two speak for themselves, your project name and path to your +Google Application Credentials. + +#### Project Identifiers + +```python +# ============================================================================= +# Google Cloud Build + Storage +# Configure a custom builder and storage endpoint +# ============================================================================= + +# google-storage, s3, google-drive, dropbox +GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json +SREGISTRY_GOOGLE_PROJECT=myproject-ftw + +``` + +You can create custom [Google Application Credentials](https://cloud.google.com/docs/authentication/getting-started) for your server in the browser, and it will be enough to make the service account +a project owner. If you are on a Google Cloud instance you can scp (with gcloud) using the command line as follows: + +```bash +$ gcloud compute scp [credentials].json $USER@[INSTANCE]:/tmp --project [PROJECT] +``` + +Keep in mind that the path to the Google credentials file must be +within the container (/code is the root folder that is bound to the filesystem). + +#### Build Caching + +```python +SREGISTRY_GOOGLE_BUILD_CACHE="true" +``` + +If you set this variable (to anything), it means that after build, you will not +delete intermediate dependencies in cloudbuild bucket (keep them as cache for rebuild if needed). +This defaults to being unset, meaning that files are cleaned up. If you define this as anything, +the build files will be cached. + +#### Build Limit + +```python +SREGISTRY_GOOGLE_BUILD_LIMIT=100 +``` + +To prevent denial of service attacks on Google Cloud Storage, you should +set a reasonable limit for the number of active, concurrent builds. This +number should be based on your expected number of users, repositories, and +recipes per repository. + + +#### Singularity Version + +By default, we use the default version that is set by the [Google Build](https://singularityhub.github.io/sregistry-cli/client-google-build#environment) client that belongs to Singularity Registry Client. +However, as this value is subject to be updated, we recommend that you set it in your +secrets and can then decide when you want to update. + +```python +SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION="v3.2.1-slim" +``` + +The version must coincide with a container tag hosted under [singularityware/singularity](https://hub.docker.com/r/singularityware/singularity/). + +#### Storage Bucket Name + +By default, the bucket name will be called `sregistry-gcloud-build-[hostname]`, and since +your host is a docker container, that will resolve to a random set of numbers and +letters. For this reason, we *strongly recommend you set a consistent hostname*. +If you do not and need to remove and bring up the containers again, the bucket +metadata will not match the new bucket name. Here is an example of how to set a custom name: + +```python +SREGISTRY_GOOGLE_STORAGE_BUCKET="taco-singularity-registry" +``` + +Additionally, a temporary bucket is created with the same name ending in `_cloudbuild`. This bucket is for build time dependencies, and is cleaned up after the fact. If you are having trouble getting a bucket it is likely because the name is taken, +and we recommend creating both `[name]` and `[name]_cloudbuild` in the console and then setting the name here. + +#### Build Timeout + +The number of seconds for the build to timeout. If set to None, will be 10 minutes. If +unset, it will default to 3 hours. This time should be less than the `SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS`. If +you want to use the default, don't define this variable in your secrets. + +```python +# SREGISTRY_GOOGLE_BUILD_TIMEOUT_SECONDS=None +``` + + +#### Build Expiration + +You must define the number of seconds that your build expires in, meaning that it would no +longer be accepted by the server. + +```python +SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS=28800 +``` + +The default provided in the dummy secrets, shown above, would indicate 8 hours. + +#### Private Containers + +By default, images that you upload will be made public, meaning that a user that stumbles on the URL (or has permission to read your bucket otherwise) will be able to see and download them. If you want to make images globally private you should export this variable as some derivative of yes/true. If no variable is found, images are made public by default. + +```python +SREGISTRY_GOOGLE_STORAGE_PRIVATE=True +``` + +These variables are written in detail in the dummy_secrets.py file. +If you need more information, you can read [the Google Cloud Build page](https://singularityhub.github.io/sregistry-cli/client-google-build). + +If you are missing some variable, there will be an error message +on interaction with the Google Cloud Build API since you won't be able to +authenticate. Once your settings are ready to go, you will want to continue +with the [install docs](https://singularityhub.github.io/sregistry/install-server#storage) where you left off, +and you can continue here after you've done: + +``` +$ docker-compose up -d +``` + +and confirmed the registry running at localhost, and also have logged in +(so you have an account with permission to push containers and recipes.) + +## Singularity Registry Client + +If you haven't yet, you will need the [sregistry client](https://singularityhub.github.io/sregistry-cli/) in order to push recipes to build with Google Cloud Build. The minimum version that supports this +is `0.2.19`. An easy way to install is any of the following: + +```bash +$ pip install sregistry[google-build] +$ pip install sregistry[google-build-basic] # without local sqlite database +``` + +Next, export the client to be your registry. + +``` +$ export SREGISTRY_CLIENT=registry +``` + +If you are reading here from the installation docs, you likely haven't +brought up your registry and should [return there](https://singularityhub.github.io/sregistry/install-settings) where you left off. + +## Building Containers + +There are two ways to trigger builds: + + 1. Automated trigger from GitHub webhooks + 2. Manual push of a recipe + +The recommended approach is to enable GitHub authentication and then +have pushes to your repository trigger builds. For the second approach, +while you can upload a recipe directly, it is not recommended +as it doesn't have the recipe kept under any version control. + +### Trigger from Github + +You will first need to log in with GitHub, and then navigate to the +container collections page (the "Containers" link in the navigation): + +![/sregistry/assets/img/google-build-new-collection.png](assets/img/google-build-new-collection.png) + +If the Google Build plugin is correctly enabled, you'll see a second option on the +right: + +![/sregistry/assets/img/google-build-connect-github.png](assets/img/google-build-connect-github.png) + +Select this, and your repositories (and organizations) that you granted +permission to connect to will show up. You can select one: + +![/sregistry/assets/img/google-build-repos.png](assets/img/google-build-repos.png) + +Once you've connected the repository, an initial build will build +the latest version of the recipes that are discovered. Any recipe that +is in the format `Singularity.` or just `Singularity` (tag defaults +to latest) will be built. + +![assets/img/google-build-collection.png](assets/img/google-build-collection.png) + +If you have two recipes named equivalently in different folders, the +recipe that was more recently updated will be used. + +### Push a Recipe + +When the server is started and the client is ready, it's time to push a recipe +to build! By default, you will need to specify the name of the collection and +container, and to include the fact that you want to use Google Cloud Build. +You'll need to install Singularity Registry Client version 0.2.21 or later: + +````bash +$ pip install sregistry[google-build]>=0.2.21 +$ pip install sregistry[google-build-basic]>=0.2.21 # without local database +``` + +Then to submit a build, you'll need to grab your credentials from https:///token. +You can write them to your Singularity Registry secrets at `$HOME/.sregistry`. Once your +token and registry base are defined, you will need to create the collection +in the web interface first to establish yourself as an owner. **You cannot +push to a collection that does not exist**. Once the collection is +created (for example, below I created the collection "collection"), you can push like this: + +```bash +$ sregistry build --name registry://collection/container:tag Singularity --builder google_build +``` + +Notice that we specify the builder to be "google_build." Also notice +that the command simply requires a name for your collection (it doesn't +need to exist, but you need push access and to have [exported your token](https://singularityhub.github.io/sregistry/credentials) to your local machine. + +If you get this error: + +```bash +[================================] 0/0 MB - 00:00:00 +Recipe upload failed: 403 Client Error: Forbidden for url: https://containers.page/google_build/build/. +``` + +you forgot to create a collection called "collection" and need to make it in the interface before +proceeding. + +## Pull Containers + +Once you have a container, you of course want to pull it! You can use +the Singularity Client to do this. Let's say that our server is at `https://www.containers.page`: + +```bash +$ singularity pull shub://containers.page/singularityhub/hello-registry:latest + 760.00 KiB / 760.00 KiB [=========================================================================================] 100.00% 5.22 MiB/s 0s +``` + +And there you have it! + +```bash +$ ls +hello-registry_latest.sif + +$ singularity run hello-registry_latest.sif +Tacotacotaco! +``` + +Note that having a custom registry name (containers.page, in the above example) +was a bug in early versions of Singularity 3.x. if you have trouble with +this command, you will need to upgrade Singularity. + +You can technically also just pull it with simple bash commands, if you +don't want to rely on Singularity. + +```bash +$ wget $(curl https://containers.page/api/container/singularityhub/hello-registry:latest | jq --raw-output .image) +``` + +If you want to pull with Singularity (but get the error) you can also do this: + +```bash +$ singularity pull $(curl https://containers.page/api/container/singularityhub/hello-registry:latest | jq --raw-output .image) +``` + +Finally, it should be pointed out that you can use the Google Builder integration +from your command line without having a registry at all. [Singularity Registry Client](https://singularityhub.github.io/sregistry-cli/client-google-build) can serve to build and then pull the image on its own. + +
+ + +

diff --git a/docs/pages/plugins/ldap/README.md b/docs/pages/plugins/ldap/README.md index 098829e0..329e6c24 100644 --- a/docs/pages/plugins/ldap/README.md +++ b/docs/pages/plugins/ldap/README.md @@ -34,7 +34,7 @@ with unencrypted, StartTLS, and SSL access to an OpenLDAP directory. ## Quick Start This quick start is intended to demonstrate basic functionality of the LDAP server, and you should -review the links referenced above for more detail. After you've completed basic setup in +review the links referenced above for more detail. ### What is LDAP? diff --git a/docs/pages/setup/register.md b/docs/pages/setup/register.md index bcf08212..70f33485 100644 --- a/docs/pages/setup/register.md +++ b/docs/pages/setup/register.md @@ -90,7 +90,11 @@ thumb: custom/taco-logo.png You can then add your files, and submit a PR to the main repo. We will have tests that ping your registry to ensure correct naming of files and registry address, along with a preview of the content that is added. If you want to prevew locally, you can run `jekyll serve`. -Great! Now that you have your accounts, you probably want to learn about how to build and push images! First you need to generate a [credential](/sregistry/credentials), and then you will can read about the [client](/sregistry/client). +Great! Now that you have your accounts, you probably want to learn about how to build and push images! +To push directly, you will first need to generate a [credential](/sregistry/credentials). If you +have enabled the [Google Build+Github]({{ site.baseurl }}/plugin-google-build) plugin, +then you will be able to log in with GitHub, and connect GitHub repositories to build +on commit. Either way, you should next read about the [client](/sregistry/client).
diff --git a/docs/pages/setup/roles.md b/docs/pages/setup/roles.md index 4ff6b8a0..2a0dd1dd 100644 --- a/docs/pages/setup/roles.md +++ b/docs/pages/setup/roles.md @@ -30,7 +30,15 @@ is an anonymous user of the registry. In the case of a private registry, this in Based on the above and granted that you are setting up the server and reading this, you will be a **superuser** because you have permissions to control the Docker images and grant other users (and yourself) the ability to push with the role **admin**. -Next, learn how users can be a part of [teams](/sregistry/setup-teams) +# Google Build + GitHub + +If you have enabled the [Google Build+Github]({{ site.baseurl }}/plugin-google-build) plugin, +then your users will be able to log in with GitHub, and build collections that are +linked to GitHub repositories. In this case, permissions for the registry interaction +do not extend to GitHub. For example, if you build from a repository that you own, +adding a collaborator or another owner will not change anything on GitHub. + +Speaking of collaborators, next, learn how users can be a part of [teams](/sregistry/setup-teams)
diff --git a/docs/pages/setup/teams.md b/docs/pages/setup/teams.md index 20cf8845..4757fd02 100644 --- a/docs/pages/setup/teams.md +++ b/docs/pages/setup/teams.md @@ -7,6 +7,7 @@ toc: false --- # Teams + To add a level of organization of users, sregistry has created loose groups of users called Teams. A registry admin can create a team, or if `USER_COLLECTIONS` is True, the an authenticated user can also create them. Creating a team means that the creator (admin or authenticated user) becomes the Owner of the team that can add and remove users. If an admin creates a team for a group of users, he or she must manage it or add a user to the list of owners to do the same. To create a team: 1. Click on the "teams" tab in the navigation bar diff --git a/docs/pages/use-cases.md b/docs/pages/use-cases.md index 45ad51f1..006df77d 100644 --- a/docs/pages/use-cases.md +++ b/docs/pages/use-cases.md @@ -17,7 +17,8 @@ In this use case, I am an individual user, or share a computer resource with a s My university runs a shared computational resource manages a registry on a server next to it. Akin to supplying software modules, the administrators keep a version controlled repo of build recipes, and when software needs to be updated, create a new image with a tag for the version. The users can then use the images by way of specifying the unique resource identifier. ## Collaborative Cluster Registry -It's often the case that pipelines are maintained internally within labs, or eventually discarded after papers are published and graduate students finish. In this use case, a large cluster wants to provide a central, organized resource for the scientific containers generated by its researchers. Perhaps alongside or instead of the core software and tools, this cluster decides to build final or published containers for its users. Building might lead to a private image for use at the institution, or a public image that can be referenced in a publication and easily disseminated. To build, the researcher simply might submit a pull request to a Github repo associated with the registry, it can be built and tested and discussed, and when ready, pushed to the resource from the continuous integration, or by the cluster's particular build server. Either way, the final upload is an authenticated, single line call to push the image with an appropriate name and tag. +It's often the case that pipelines are maintained internally within labs, or eventually discarded after papers are published and graduate students finish. In this use case, a large cluster wants to provide a central, organized resource for the scientific containers generated by its researchers. Perhaps alongside or instead of the core software and tools, this cluster decides to build final or published containers for its users. Building might lead to a private image for use at the institution, or a public image that can be referenced in a publication and easily disseminated. To build, the researcher simply might submit a pull request to a Github repo associated with the registry, it can be built and tested and discussed, and when ready, pushed to the resource from the continuous integration, or by the cluster's particular build server. Either way, the final upload is an authenticated, single line call to push the image with an appropriate name and tag. If you +add [plugins](/sregistry-cli/plugins) you can also have custom authentication and builds (e.g., GitHub webhooks + Google Cloud Build). If you are a single user and looking for an image management tool, perhaps to work with images in multiple locations beyond a Singularity Registry, Server then you will be interested in the [Singularity Global Client](https://singularityhub.github.io/sregistry-cli). diff --git a/https/docker-compose.yml b/https/docker-compose.yml index d268dbaa..b9f1c26c 100644 --- a/https/docker-compose.yml +++ b/https/docker-compose.yml @@ -8,6 +8,9 @@ uwsgi: - .:/code - ./static:/var/www/static - ./images:/var/www/images + # uncomment for PAM auth + #- /etc/passwd:/etc/passwd + #- /etc/shadow:/etc/shadow links: - redis - db @@ -32,13 +35,21 @@ nginx: redis: restart: always image: redis:latest - ports: - - "6379:6379" +scheduler: + image: vanessa/sregistry + command: python /code/manage.py rqscheduler + volumes: + - .:/code + volumes_from: + - uwsgi + links: + - redis + - db worker: image: vanessa/sregistry - command: celery worker -A shub.celery -Q default -n default@%h -B + command: python /code/manage.py rqworker default volumes: - .:/code volumes_from: diff --git a/requirements.txt b/requirements.txt index f6ddf91b..82734c54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,41 +1,44 @@ +anyjson +coreapi==2.3.3 cython -h5py -uwsgi -django>=1.11.18 -social-auth-app-django -social-auth-core[saml] -python-social-auth -djangorestframework -markdown -django-filter +django-chosen +django-crispy-forms +django-datatables-view +django-dirtyfields django-extensions -django-taggit +django-filter django-form-utils -django-crispy-forms +django-gravatar2 +django-guardian +django-hstore==1.3.5 django-notifications-hq +django-rest-swagger +django-rq +django-taggit django-taggit-templatetags -django-dirtyfields -psycopg2-binary +django-user-agents +django>=1.11.18 +djangorestframework +google +google-api-python-client +h5py +ipython +markdown numexpr -shapely +oauth2client==3.0 Pillow +PyYAML==5.1 +psycopg2-binary +pygments +python3-saml +python-social-auth requests requests-oauthlib requests-toolbelt -celery[redis]<4 -django-celery -django-chosen -opbeat -django-hstore==1.3.5 -django-datatables-view -sregistry==0.0.97 -django-gravatar2 -pygments -google-api-python-client -google -oauth2client==3.0 retrying -django-rest-swagger -django-user-agents -django-guardian -python3-saml +rq-scheduler +shapely +social-auth-app-django +social-auth-core[saml] +sregistry[all-basic]>=0.2.19 +uwsgi diff --git a/run_uwsgi.sh b/run_uwsgi.sh index e7b2baa3..dd48ca6c 100755 --- a/run_uwsgi.sh +++ b/run_uwsgi.sh @@ -2,14 +2,11 @@ python manage.py makemigrations python manage.py migrate auth -python manage.py makemigrations users -python manage.py makemigrations main -python manage.py makemigrations api -python manage.py makemigrations logs python manage.py migrate python manage.py collectstatic --noinput service cron start + if grep -Fxq "PLUGINS_ENABLED+=[\"globus\"]" /code/shub/settings/config.py then # When configured, we can start the endpoint diff --git a/scripts/generate_cert.sh b/scripts/generate_cert.sh old mode 100755 new mode 100644 index f4b6e721..ffce55b8 --- a/scripts/generate_cert.sh +++ b/scripts/generate_cert.sh @@ -4,45 +4,54 @@ # # -INSTALL_ROOT=${1} -EMAIL=${2} +EMAIL=${1} DOMAIN=${2} -STATE=${3:-California} -COUNTY=${4:-San Mateo County} - -sudo mkdir /opt/acme_tiny -cd /tmp && git clone https://github.com/diafygi/acme-tiny -sudo mv acme-tiny /opt/acme-tiny/ -sudo chown $USER -R /opt/acme-tiny - -# Create a directory for the keys and cert -cd $INSTALL_ROOT/sregistry - -# If you started the images, stop nginx -docker-compose stop nginx -sudo service nginx start - -# backup old key and cert -if [ -f "/etc/ssl/private/domain.key" ] - then - sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)} -fi - -if [ -f "/etc/ssl/certs/chained.pem" ] - then - sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)} -fi - -if [ -f "/etc/ssl/certs/domain.csr" ] - then - sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)} -fi - -# Generate a private account key, if doesn't exist -if [ ! -f "/etc/ssl/certs/account.key" ] - then - openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs -fi +INSTALL_ROOT=$HOME + +# Install certbot (if not already done) +sudo add-apt-repository ppa:certbot/certbot +sudo apt-get update +sudo apt-get install python-certbot-nginx + +# Get certificates (might need sudo) +certbot certonly --nginx -d "${DOMAIN}" -d "www.${DOMAIN}" --email "${EMAIL}" --agree-tos --redirect + +# The prompt is interactive, and will show the locations of certificates + +# Obtaining a new certificate +# Performing the following challenges: +# http-01 challenge for containers.page +# http-01 challenge for www.containers.page +# Waiting for verification... +# Cleaning up challenges + +# IMPORTANT NOTES: +# - Congratulations! Your certificate and chain have been saved at: +# /etc/letsencrypt/live/containers.page/fullchain.pem +# Your key file has been saved at: +# /etc/letsencrypt/live/containers.page/privkey.pem +# Your cert will expire on 2019-09-04. To obtain a new or tweaked +# version of this certificate in the future, simply run certbot +# again. To non-interactively renew *all* of your certificates, run +# "certbot renew" +# - Your account credentials have been saved in your Certbot +# configuration directory at /etc/letsencrypt. You should make a +# secure backup of this folder now. This configuration directory will +# also contain certificates and private keys obtained by Certbot so +# making regular backups of this folder is ideal. +# - If you like Certbot, please consider supporting our work by: + +# Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate +# Donating to EFF: https://eff.org/donate-le + +# Since the containers expect these files to be in /etc/ssl, copy there +# This CANNOT be a link. +sudo cp /etc/letsencrypt/live/containers.page/fullchain.pem /etc/ssl/certs/chained.pem +sudo cp /etc/letsencrypt/live/containers.page/privkey.pem /etc/ssl/private/domain.key + +# Create recursive backup +backup=$(echo /etc/letsencrypt{,.bak.$(date +%s)} | cut -d ' ' -f 2) +sudo cp -R /etc/letsencrypt $backup # Add extra security if [ ! -f "/etc/ssl/certs/dhparam.pem" ] @@ -50,57 +59,6 @@ if [ ! -f "/etc/ssl/certs/dhparam.pem" ] openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs fi -if [ ! -f "csr_details.txt" ] -then - -cat > csr_details.txt <<-EOF -[req] -default_bits = 2048 -prompt = no -default_md = sha256 -req_extensions = req_ext -distinguished_name = dn - -[ dn ] -C=US -ST=$STATE -L=$COUNTY -O=End Point -OU=$DOMAIN -emailAddress=$EMAIL -CN = www.$DOMAIN - -[ req_ext ] -subjectAltName = @alt_names - -[ alt_names ] -DNS.1 = $DOMAIN -DNS.2 = www.$DOMAIN -EOF - -fi - -# Call openssl -openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt ) - -# Create a CSR for $DOMAIN -#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr -sudo mv domain.csr /etc/ssl/certs/domain.csr -sudo mv domain.key /etc/ssl/private/domain.key - -# Create the challenge folder in the webroot -sudo mkdir -p /var/www/html/.well-known/acme-challenge/ -sudo chown $USER -R /var/www/html/ - -# Get a signed certificate with acme-tiny -#docker-compose stop nginx -python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt - -wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem -cat signed.crt intermediate.pem > chained.pem -sudo mv chained.pem /etc/ssl/certs/ -rm signed.crt intermediate.pem - # Stop nginx sudo service nginx stop diff --git a/scripts/generate_certs.sh b/scripts/generate_certs.sh new file mode 100755 index 00000000..f4b6e721 --- /dev/null +++ b/scripts/generate_certs.sh @@ -0,0 +1,108 @@ +#! /bin/bash +# +# nginx should be installed on the host machine +# +# + +INSTALL_ROOT=${1} +EMAIL=${2} +DOMAIN=${2} +STATE=${3:-California} +COUNTY=${4:-San Mateo County} + +sudo mkdir /opt/acme_tiny +cd /tmp && git clone https://github.com/diafygi/acme-tiny +sudo mv acme-tiny /opt/acme-tiny/ +sudo chown $USER -R /opt/acme-tiny + +# Create a directory for the keys and cert +cd $INSTALL_ROOT/sregistry + +# If you started the images, stop nginx +docker-compose stop nginx +sudo service nginx start + +# backup old key and cert +if [ -f "/etc/ssl/private/domain.key" ] + then + sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)} +fi + +if [ -f "/etc/ssl/certs/chained.pem" ] + then + sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)} +fi + +if [ -f "/etc/ssl/certs/domain.csr" ] + then + sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)} +fi + +# Generate a private account key, if doesn't exist +if [ ! -f "/etc/ssl/certs/account.key" ] + then + openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs +fi + +# Add extra security +if [ ! -f "/etc/ssl/certs/dhparam.pem" ] + then + openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs +fi + +if [ ! -f "csr_details.txt" ] +then + +cat > csr_details.txt <<-EOF +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +C=US +ST=$STATE +L=$COUNTY +O=End Point +OU=$DOMAIN +emailAddress=$EMAIL +CN = www.$DOMAIN + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = $DOMAIN +DNS.2 = www.$DOMAIN +EOF + +fi + +# Call openssl +openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt ) + +# Create a CSR for $DOMAIN +#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr +sudo mv domain.csr /etc/ssl/certs/domain.csr +sudo mv domain.key /etc/ssl/private/domain.key + +# Create the challenge folder in the webroot +sudo mkdir -p /var/www/html/.well-known/acme-challenge/ +sudo chown $USER -R /var/www/html/ + +# Get a signed certificate with acme-tiny +#docker-compose stop nginx +python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt + +wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem +cat signed.crt intermediate.pem > chained.pem +sudo mv chained.pem /etc/ssl/certs/ +rm signed.crt intermediate.pem + +# Stop nginx +sudo service nginx stop + +cd $INSTALL_ROOT/sregistry +docker-compose up -d diff --git a/scripts/generate_certs_tiny-acme.sh b/scripts/generate_certs_tiny-acme.sh new file mode 100755 index 00000000..f4b6e721 --- /dev/null +++ b/scripts/generate_certs_tiny-acme.sh @@ -0,0 +1,108 @@ +#! /bin/bash +# +# nginx should be installed on the host machine +# +# + +INSTALL_ROOT=${1} +EMAIL=${2} +DOMAIN=${2} +STATE=${3:-California} +COUNTY=${4:-San Mateo County} + +sudo mkdir /opt/acme_tiny +cd /tmp && git clone https://github.com/diafygi/acme-tiny +sudo mv acme-tiny /opt/acme-tiny/ +sudo chown $USER -R /opt/acme-tiny + +# Create a directory for the keys and cert +cd $INSTALL_ROOT/sregistry + +# If you started the images, stop nginx +docker-compose stop nginx +sudo service nginx start + +# backup old key and cert +if [ -f "/etc/ssl/private/domain.key" ] + then + sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)} +fi + +if [ -f "/etc/ssl/certs/chained.pem" ] + then + sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)} +fi + +if [ -f "/etc/ssl/certs/domain.csr" ] + then + sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)} +fi + +# Generate a private account key, if doesn't exist +if [ ! -f "/etc/ssl/certs/account.key" ] + then + openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs +fi + +# Add extra security +if [ ! -f "/etc/ssl/certs/dhparam.pem" ] + then + openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs +fi + +if [ ! -f "csr_details.txt" ] +then + +cat > csr_details.txt <<-EOF +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +C=US +ST=$STATE +L=$COUNTY +O=End Point +OU=$DOMAIN +emailAddress=$EMAIL +CN = www.$DOMAIN + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = $DOMAIN +DNS.2 = www.$DOMAIN +EOF + +fi + +# Call openssl +openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt ) + +# Create a CSR for $DOMAIN +#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr +sudo mv domain.csr /etc/ssl/certs/domain.csr +sudo mv domain.key /etc/ssl/private/domain.key + +# Create the challenge folder in the webroot +sudo mkdir -p /var/www/html/.well-known/acme-challenge/ +sudo chown $USER -R /var/www/html/ + +# Get a signed certificate with acme-tiny +#docker-compose stop nginx +python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt + +wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem +cat signed.crt intermediate.pem > chained.pem +sudo mv chained.pem /etc/ssl/certs/ +rm signed.crt intermediate.pem + +# Stop nginx +sudo service nginx stop + +cd $INSTALL_ROOT/sregistry +docker-compose up -d diff --git a/scripts/globus/robotnamer.py b/scripts/globus/robotnamer.py index 21523cc9..375cd4ef 100755 --- a/scripts/globus/robotnamer.py +++ b/scripts/globus/robotnamer.py @@ -1,7 +1,6 @@ #!/usr/bin/env python ''' - Copyright (C) 2019 Vanessa Sochat. This Source Code Form is subject to the terms of the @@ -48,15 +47,14 @@ class RobotNamer: def generate(self, delim='-', length=4, chars='0123456789'): - ''' - Generate a robot name. Inspiration from Haikunator, but much more + '''Generate a robot name. Inspiration from Haikunator, but much more poorly implemented ;) - Parameters - ========== - delim: Delimiter - length: TokenLength - chars: TokenChars + Parameters + ========== + delim: Delimiter + length: TokenLength + chars: TokenChars ''' descriptor = self._select(self._descriptors) diff --git a/scripts/nginx-default.conf b/scripts/nginx-default.conf new file mode 100644 index 00000000..b030bff0 --- /dev/null +++ b/scripts/nginx-default.conf @@ -0,0 +1,49 @@ +# This is provided for the instance in case https is needed outside of the +# docker-compose. You can move this file to /etc/nginx/conf.d/default.conf +# and then bring up nginx (sudo service nginx start) + +server { + listen *:80; + server_name localhost; + + client_max_body_size 8000M; + client_body_buffer_size 8000M; + client_body_timeout 120; + + add_header X-Clacks-Overhead "GNU Terry Pratchett"; + add_header X-Clacks-Overhead "GNU Terry Pratchet"; + add_header Access-Control-Allow-Origin *; + add_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'Authorization,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'; + + root /var/www/html; + index index.html + try_files $uri /index.html; +} + +server { + + listen 443; + server_name localhost; + + client_max_body_size 2000M; + client_body_buffer_size 2000M; + + ssl on; + #ssl_certificate /etc/letsencrypt/live/singularity-hub.org/fullchain.pem; + ssl_certificate /etc/ssl/certs/chained.pem; + ssl_certificate_key /etc/ssl/private/domain.key; + #ssl_certificate_key /etc/letsencrypt/live/singularity-hub.org/privkey.pem; + ssl_session_timeout 5m; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA; + ssl_session_cache shared:SSL:50m; + ssl_dhparam /etc/ssl/certs/dhparam.pem; + ssl_prefer_server_ciphers on; + + root /var/www/html; + index index.html + try_files $uri /index.html; +} + diff --git a/scripts/prepare_instance.sh b/scripts/prepare_instance.sh index 7f71b84f..6e53294d 100644 --- a/scripts/prepare_instance.sh +++ b/scripts/prepare_instance.sh @@ -3,6 +3,9 @@ # Change this to where you want to install. $HOME # is probably a bad choice if it needs to be maintained # by a group of people + +# This was developed on Ubuntu 18.04 LTS on Google Cloud + INSTALL_ROOT=$HOME # Prepare instance (or machine) with Docker, docker-compose, python @@ -20,31 +23,40 @@ sudo pip install ipaddress sudo pip install oauth2client -# Python 3 -wget https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh -bash Anaconda3-4.2.0-Linux-x86_64.sh -b - -# You might already have anaconda installed somewhere -PATH=$HOME/anaconda3/bin:$PATH -rm Anaconda3-4.2.0-Linux-x86_64.sh -export PATH +# Install Docker dependencies +sudo apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common # Add docker key server -sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +# OK + +# $ sudo apt-key fingerprint 0EBFCD88 +# pub rsa4096 2017-02-22 [SCEA] +# 9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88 +# uid [ unknown] Docker Release (CE deb) +# sub rsa4096 2017-02-22 [S] + +# Add stable repository + sudo add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" # Install Docker! sudo apt-get update && -sudo apt-get install apt-transport-https ca-certificates && -sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D -echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" | sudo tee --append /etc/apt/sources.list.d/docker.list -sudo apt-get update && -apt-cache policy docker-engine -sudo apt-get update && -sudo apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual && -sudo apt-get -y install docker-engine && -sudo service docker start +sudo apt-get install -y docker-ce docker-ce-cli containerd.io + +# test, you will still need sudo +sudo docker run hello-world + +# Docker group should already exist +# sudo groupadd docker -#sudo docker run hello-world #make sure to add all users that will maintain / use the registry sudo usermod -aG docker $USER @@ -53,11 +65,16 @@ sudo apt -y install docker-compose # Note that you will need to log in and out for changes to take effect -if [ ! -d $INSTALL_ROOT/singularity-registry ] -then - cd $INSTALL_ROOT - git clone https://www.github.com/singularityhub/sregistry.git - cd sregistry - docker build -t vanessa/sregistry . - docker-compose up -d +if [ ! -d $INSTALL_ROOT/sregistry ]; then + cd $INSTALL_ROOT + + # production + # git clone https://www.github.com/singularityhub/sregistry.git + + # development + git clone -b add/builders https://www.github.com/singularityhub/sregistry.git + + cd sregistry + docker build -t vanessa/sregistry . + docker-compose up -d fi diff --git a/shub/__init__.py b/shub/__init__.py index e09a930d..e69de29b 100644 --- a/shub/__init__.py +++ b/shub/__init__.py @@ -1,4 +0,0 @@ -from __future__ import absolute_import, unicode_literals -from .celery import shubcelery as celery_app - -__all__ = ['celery_app'] diff --git a/shub/apps/api/actions/create.py b/shub/apps/api/actions/create.py index b950935a..82a15d76 100644 --- a/shub/apps/api/actions/create.py +++ b/shub/apps/api/actions/create.py @@ -9,13 +9,11 @@ ''' from shub.settings import MEDIA_ROOT -from sregistry.utils import parse_image_name from shub.logger import bot +from sregistry.utils import parse_image_name from django.db import IntegrityError -from django.db.utils import DataError +import django_rq import shutil -import uuid -import json import os def move_upload_to_storage(collection, upload_id): @@ -36,7 +34,7 @@ def move_upload_to_storage(collection, upload_id): # Rename the file, moving from ImageUpload to Storage filename = os.path.basename(instance.file.path) - new_path = os.path.join(image_home, filename.replace('.part', '.simg')) + new_path = os.path.join(image_home, filename.replace('.part', '.sif')) shutil.move(instance.file.path, new_path) print('%s --> %s' %(instance.file.path, new_path)) instance.file.name = new_path @@ -52,7 +50,7 @@ def generate_nginx_storage_path(collection, source, dest): source: the source file (under /var/www/images/_upload/{0-9} dest: the destination filename ''' - image_home = "%s/%s" %(MEDIA_ROOT, collection.name) + image_home = os.path.join(MEDIA_ROOT, collection.name) return os.path.join(image_home, os.path.basename(dest)) @@ -68,7 +66,7 @@ def move_nginx_upload_to_storage(collection, source, dest): dest: the destination filename ''' # Create collection root, if it doesn't exist - image_home = "%s/%s" %(MEDIA_ROOT, collection.name) + image_home = os.path.join(MEDIA_ROOT, collection.name) if not os.path.exists(image_home): os.mkdir(image_home) @@ -76,6 +74,19 @@ def move_nginx_upload_to_storage(collection, source, dest): shutil.move(source, new_path) return new_path +def calculate_version(cid): + '''calculate version is run as a separate task after a container upload. + Instead of using md5 provided by nginx we calculate sha256 sum and + then include as the version variable. + ''' + from shub.apps.main.views import get_container + from sregistry.utils import get_file_hash + print("Calculating version for upload.") + container = get_container(cid) + version = "sha256.%s" % get_file_hash(container.image.datafile.path, "sha256") + container.version = version + container.save() + def upload_container(cid, user, name, version, upload_id, size=None): '''save an uploaded container, usually coming from an ImageUpload @@ -97,9 +108,8 @@ def upload_container(cid, user, name, version, upload_id, size=None): error / success codes. ''' - from shub.apps.main.models import ( Container, Collection ) - from shub.apps.api.models import ( ImageUpload, ImageFile ) - from shub.apps.main.views import update_container_labels + from shub.apps.main.models import (Container, Collection) + from shub.apps.api.models import (ImageUpload, ImageFile) collection = Collection.objects.get(id=cid) # Only continue if user is an owner @@ -142,7 +152,7 @@ def upload_container(cid, user, name, version, upload_id, size=None): # If one exists, we check if it's frozen create_new = True - if len(containers) > 0: + if containers: # If we already have a container, it might be frozen container = containers[0] @@ -186,6 +196,9 @@ def upload_container(cid, user, name, version, upload_id, size=None): # Once the container is saved, delete the intermediate file object delete_file_instance(instance) + # Run a task to calculate the sha256 sum + django_rq.enqueue(calculate_version, cid=container.id) + def delete_file_instance(instance): '''a helper function to remove the file assocation, and delete the instance diff --git a/shub/apps/api/actions/delete.py b/shub/apps/api/actions/delete.py index d9135b30..87ad7a67 100644 --- a/shub/apps/api/actions/delete.py +++ b/shub/apps/api/actions/delete.py @@ -1,6 +1,6 @@ ''' -Copyright (C) 2017-2018 Vanessa Sochat. +Copyright (C) 2017-2019 Vanessa Sochat. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed @@ -9,14 +9,13 @@ ''' from shub.logger import bot -from sregistry.main.registry.auth import generate_timestamp from shub.apps.api.utils import validate_request - +from sregistry.main.registry.auth import generate_timestamp def delete_container(request, container): '''delete a container only given authentication to do so''' - auth=request.META.get('HTTP_AUTHORIZATION', None) + auth = request.META.get('HTTP_AUTHORIZATION', None) if auth is None: bot.debug("authentication is invalid.") @@ -29,7 +28,7 @@ def delete_container(request, container): container.tag) bot.debug("Request payload %s" %payload) - if not validate_request(auth,payload,"delete",timestamp): + if not validate_request(auth, payload, "delete", timestamp): bot.debug("request is invalid.") return False diff --git a/shub/apps/api/actions/push.py b/shub/apps/api/actions/push.py index 36c531d4..13064d88 100644 --- a/shub/apps/api/actions/push.py +++ b/shub/apps/api/actions/push.py @@ -8,22 +8,24 @@ ''' -from shub.logger import bot + from django.http import JsonResponse +from django.views.decorators.csrf import csrf_exempt + from rest_framework.exceptions import PermissionDenied from shub.apps.main.models import Collection -from django.views.decorators.csrf import csrf_exempt from shub.apps.main.utils import format_collection_name from shub.apps.api.utils import ( validate_request, has_permission, get_request_user ) +from sregistry.main.registry.auth import generate_timestamp import json import uuid -from sregistry.main.registry.auth import generate_timestamp + @csrf_exempt def collection_auth_check(request): @@ -31,15 +33,15 @@ def collection_auth_check(request): return a collection id (cid) if a collection exists and the user has permission to upload. If not, a permission denied is returned. ''' - auth=request.META.get('HTTP_AUTHORIZATION', None) + auth = request.META.get('HTTP_AUTHORIZATION', None) # Load the body, which is json with variables body_unicode = request.body.decode('utf-8') body = json.loads(body_unicode) # Get variables - tag=body.get('tag','latest') - name=body.get('name') + tag = body.get('tag', 'latest') + name = body.get('name') collection_name = format_collection_name(body.get('collection')) print(tag, name, collection_name, auth, body) @@ -72,8 +74,12 @@ def collection_auth_check(request): # 2- the user is a superuser or staff # 3- the user is owner of a collection if not has_permission(auth, collection, pull_permission=False): - raise PermissionDenied(detail="Unauthorized") - + raise PermissionDenied(detail="Unauthorized") + + # If the user cannot create a new collection + if not owner.has_create_permission(): + raise PermissionDenied(detail="Unauthorized") + # If we get here user has create permission, does collection exist? if collection is None: collection = Collection.objects.create(name=collection_name, @@ -83,4 +89,4 @@ def collection_auth_check(request): collection.save() # Return json response with collection id - return JsonResponse({'cid': collection.id }) + return JsonResponse({'cid': collection.id}) diff --git a/shub/apps/api/actions/upload.py b/shub/apps/api/actions/upload.py index d5d5c905..746f66ac 100644 --- a/shub/apps/api/actions/upload.py +++ b/shub/apps/api/actions/upload.py @@ -8,25 +8,25 @@ ''' -from shub.logger import bot from urllib.parse import unquote from django.http import JsonResponse from django.contrib.auth.mixins import LoginRequiredMixin import json from django.shortcuts import redirect +from django.views.decorators.csrf import csrf_exempt +from django.views.generic.base import TemplateView +from django.contrib import messages from shub.apps.main.models import Collection -from sregistry.main.registry.auth import generate_timestamp from shub.apps.api.utils import ( get_request_user, has_permission, validate_request ) +from sregistry.main.registry.auth import generate_timestamp + from rest_framework.exceptions import PermissionDenied -from django.views.decorators.csrf import csrf_exempt -from django.views.generic.base import TemplateView -from django.contrib import messages import os @@ -91,12 +91,12 @@ def upload_complete(request): name = "%s:%s" %(name, tag) # Expected params are upload_id, name, md5, and cid - message = upload_container(cid = collection.id, - user = owner, - version = version, - upload_id = path, - name = name, - size = size) + message = upload_container(cid=collection.id, + user=owner, + version=version, + upload_id=path, + name=name, + size=size) # If the function doesn't return a message (None), indicates success if message is None: diff --git a/shub/apps/api/models.py b/shub/apps/api/models/__init__.py similarity index 86% rename from shub/apps/api/models.py rename to shub/apps/api/models/__init__.py index a5b016df..27d2d98c 100644 --- a/shub/apps/api/models.py +++ b/shub/apps/api/models/__init__.py @@ -9,10 +9,9 @@ ''' from django.contrib.contenttypes.models import ContentType -from django.core.files.storage import FileSystemStorage -from django.db.models.signals import post_save from django.conf import settings from django.db import models +from .storage import OverwriteStorage import uuid import time import hashlib @@ -29,8 +28,7 @@ def get_upload_to(instance, filename): def get_upload_folder(instance, filename): '''a helper function to upload to storage ''' - from shub.apps.main.models import Container, Collection - tag = instance.tag.lower() + from shub.apps.main.models import Collection collection_name = instance.collection.lower() instance.collection = collection_name @@ -43,7 +41,7 @@ def get_upload_folder(instance, filename): collection.save() # Create collection root, if it doesn't exist - image_home = "%s/%s" %(settings.MEDIA_ROOT,collection_name) + image_home = "%s/%s" %(settings.MEDIA_ROOT, collection_name) if not os.path.exists(image_home): os.mkdir(image_home) @@ -53,20 +51,13 @@ def get_upload_folder(instance, filename): ################################################################################ -# MODELS & STORAGE +# MODELS ################################################################################ -class OverwriteStorage(FileSystemStorage): - - def get_available_name(self, name, max_length=None): - # If the filename already exists, remove it as if it was a true file system - if self.exists(name): - os.remove(os.path.join(settings.MEDIA_ROOT, name)) - return name - - class ImageFile(models.Model): + '''an ImageFile is a Singularity container pushed directly. + ''' created = models.DateTimeField(auto_now_add=True) collection = models.CharField(max_length=200, null=False) tag = models.CharField(max_length=200, null=False) diff --git a/shub/apps/api/models/storage.py b/shub/apps/api/models/storage.py new file mode 100644 index 00000000..92f22084 --- /dev/null +++ b/shub/apps/api/models/storage.py @@ -0,0 +1,25 @@ +''' + +Copyright (C) 2017-2019 Vanessa Sochat. + +This Source Code Form is subject to the terms of the +Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed +with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +''' + +from django.core.files.storage import FileSystemStorage +from django.conf import settings +import os + +################################################################################ +# STORAGE +################################################################################ + +class OverwriteStorage(FileSystemStorage): + + def get_available_name(self, name, max_length=None): + # If the filename already exists, remove it as if it was a true file system + if self.exists(name): + os.remove(os.path.join(settings.MEDIA_ROOT, name)) + return name diff --git a/shub/apps/api/tasks.py b/shub/apps/api/tasks.py index 8902b3e0..867bd987 100644 --- a/shub/apps/api/tasks.py +++ b/shub/apps/api/tasks.py @@ -8,21 +8,19 @@ ''' -from celery import shared_task, Celery -from django.conf import settings -import os +from shub.logger import bot -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shub.settings') -app = Celery('shub') -app.config_from_object('django.conf:settings','shub.settings') -app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) - - -@shared_task def expire_share(sid): + '''expire a share based on its id, meaning deleting it so that + it can no longer be used. + + Parameters + ========== + sid: the share id to expire + ''' from shub.apps.main.models import Share try: share = Share.objects.get(id=sid) share.delete() except Share.DoesNotExist: - bot.warning("Share %s expired." %sid) + bot.warning("Share %s expired." % sid) diff --git a/shub/apps/api/templates/rest_framework_swagger/base.html b/shub/apps/api/templates/rest_framework_swagger/base.html index e9461b76..b2de7bb4 100644 --- a/shub/apps/api/templates/rest_framework_swagger/base.html +++ b/shub/apps/api/templates/rest_framework_swagger/base.html @@ -1,88 +1,70 @@ +{% load i18n %} {% load staticfiles %} - + Swagger UI - - - - - - - + + + {% block extra_styles %} {# -- Add any additional CSS scripts here -- #} {% endblock %} - - - - - - - - - - - - - - - - - - - - - - - -{% block header %} -