diff --git a/.gitignore b/.gitignore
index 7430aae3..38a7623a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,7 @@ _site
# Other #
#########
+*.json
migrations/
make_video.sh
celerybeat-schedule.db
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d69a0407..eb38fcad 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,7 @@ represented by the pull requests that fixed them. Critical items to know are:
## [master](https://github.com/singularityhub/sregistry/tree/master) (master)
+ - Addition of Google Cloud Build, versioning, tags to collections (1.1.0)
- adding BitBucket authentication backend
- updating sregistry-cli to 0.0.97, catching OSError earlier
- updating sregistry-cli to 0.0.96, and Singularity download url to use sylabs organization
diff --git a/Dockerfile b/Dockerfile
index c92ccb54..49fa0269 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -33,11 +33,6 @@ RUN apt-get update && apt-get install -y \
squashfs-tools \
build-essential
-# Install Singularity
-RUN git clone -b vault/release-2.6 https://www.github.com/sylabs/singularity.git
-WORKDIR singularity
-RUN ./autogen.sh && ./configure --prefix=/usr/local && make && make install
-
# Install Python requirements out of /tmp so not triggered if other contents of /code change
ADD requirements.txt /tmp/requirements.txt
RUN pip install --upgrade pip
@@ -47,7 +42,7 @@ ADD . /code/
################################################################################
# PLUGINS
-# You are free to comment out those plugins that you don't want to use
+# You are free to uncomment the plugins that you want to use
# Install LDAP (uncomment if wanted)
# RUN pip install python3-ldap
@@ -56,6 +51,9 @@ ADD . /code/
# Install PAM Authentication (uncomment if wanted)
# RUN pip install django-pam
+# Ensure Google Build Installed
+# RUN pip install sregistry[google-build]
+
# Install Globus (uncomment if wanted)
# RUN /bin/bash /code/scripts/globus/globus-install.sh
diff --git a/README.md b/README.md
index b60dd36d..02275471 100644
--- a/README.md
+++ b/README.md
@@ -16,9 +16,10 @@ to work together using [docker-compose.yml](docker-compose.yml).
The images are the following:
- **vanessa/sregistry**: is the main uwsgi application, which serves a Django (python-based) application.
- - **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly.
- - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [celery](http://www.celeryproject.org/), a distributed job queue that fits nicely into Django. The celery worker uses a
+ - **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly. Note that we build a custom nginx image that takes advantage of the [nginx upload module](https://www.nginx.com/resources/wiki/modules/upload/).
+ - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [django-rq](https://github.com/rq/django-rq) that uses a
- **redis**: database to organize the jobs themselves.
+ - **scheduler** jobs can be scheduled using the scheduler.
For more information about Singularity Registry Server, please reference the
[docs](https://singularityhub.github.io/sregistry). If you have any issues,
diff --git a/VERSION b/VERSION
new file mode 100644
index 00000000..9084fa2f
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+1.1.0
diff --git a/docker-compose.yml b/docker-compose.yml
index dc7f2a55..c3365e6b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -32,13 +32,21 @@ nginx:
redis:
restart: always
image: redis:latest
- ports:
- - "6379:6379"
+scheduler:
+ image: vanessa/sregistry
+ command: python /code/manage.py rqscheduler
+ volumes:
+ - .:/code
+ volumes_from:
+ - uwsgi
+ links:
+ - redis
+ - db
worker:
image: vanessa/sregistry
- command: celery worker -A shub.celery -Q default -n default@%h -B
+ command: python /code/manage.py rqworker default
volumes:
- .:/code
volumes_from:
diff --git a/docs/assets/img/api-docs.png b/docs/assets/img/api-docs.png
new file mode 100644
index 00000000..8a302030
Binary files /dev/null and b/docs/assets/img/api-docs.png differ
diff --git a/docs/assets/img/api-swagger.png b/docs/assets/img/api-swagger.png
new file mode 100644
index 00000000..400fc174
Binary files /dev/null and b/docs/assets/img/api-swagger.png differ
diff --git a/docs/assets/img/google-build-collection.png b/docs/assets/img/google-build-collection.png
new file mode 100644
index 00000000..62669dd6
Binary files /dev/null and b/docs/assets/img/google-build-collection.png differ
diff --git a/docs/assets/img/google-build-connect-github.png b/docs/assets/img/google-build-connect-github.png
new file mode 100644
index 00000000..fb3246d8
Binary files /dev/null and b/docs/assets/img/google-build-connect-github.png differ
diff --git a/docs/assets/img/google-build-new-collection.png b/docs/assets/img/google-build-new-collection.png
new file mode 100644
index 00000000..3c5a8eea
Binary files /dev/null and b/docs/assets/img/google-build-new-collection.png differ
diff --git a/docs/assets/img/google-build-repos.png b/docs/assets/img/google-build-repos.png
new file mode 100644
index 00000000..c25b57f9
Binary files /dev/null and b/docs/assets/img/google-build-repos.png differ
diff --git a/docs/assets/img/google-cloud-build-hash.png b/docs/assets/img/google-cloud-build-hash.png
new file mode 100644
index 00000000..db16ae43
Binary files /dev/null and b/docs/assets/img/google-cloud-build-hash.png differ
diff --git a/docs/assets/img/google-cloud-build-steps.png b/docs/assets/img/google-cloud-build-steps.png
new file mode 100644
index 00000000..9002d96a
Binary files /dev/null and b/docs/assets/img/google-cloud-build-steps.png differ
diff --git a/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg b/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg
new file mode 100644
index 00000000..441e5cf8
Binary files /dev/null and b/docs/assets/img/phyllis-latour-doyle-ww2-blog-web.jpg differ
diff --git a/docs/pages/client.md b/docs/pages/client.md
index fa1cacf5..0a80aa74 100644
--- a/docs/pages/client.md
+++ b/docs/pages/client.md
@@ -6,15 +6,29 @@ permalink: /client
toc: false
---
-
-
+## Singularity Pull
+
+Singularity Registry Server implements a basic version of the Sylabs Library API,
+meaning that you can pull a container with Singularity directly. For example,
+let's say that I have a collection with a container called `collection/container:tag`.
+and my registry is served at `containers.page`. I could pull it as follows:
+
+```bash
+$ singularity pull --library https://containers.page collection/container:tag
+```
+
+# SRegistry Client
+
+Singularity Registry Global Client, or [sregistry-cli](https://github.com/singularityhub/sregistry-cli),
+is a general client to interact with Singularity images at remote endpoints, and it provides
+such an endpoint for Singularity Registry Server. We will provide
+basic instructions here, and for the full documentation, please see the [getting started guide here](https://singularityhub.github.io/sregistry-cli/client-registry). Note that you will need to [export your credentials](https://singularityhub.github.io/sregistry/credentials) in order to have authenticated interaction with sregistry.
-The original Singularity Registry Client was provided by [Singularity Python](https://github.com/singularityware/singularity-python), however we have moved the client to have its own module under [sregistry-cli](https://github.com/singularityhub/sregistry-cli). We recommend that you use the latter, and ask for features or updates when necessary. For the new version, see the [getting started guide here](https://singularityhub.github.io/sregistry-cli/client-registry). Note that you will need to [export your credentials](https://singularityhub.github.io/sregistry/credentials) in order to have authenticated
-interaction with sregistry.
## Install
### sregistry Installation
+
`sregistry` is the client for Singularity Registry server. To install, you can do the following:
```
diff --git a/docs/pages/install/builders.md b/docs/pages/install/builders.md
new file mode 100644
index 00000000..b283a172
--- /dev/null
+++ b/docs/pages/install/builders.md
@@ -0,0 +1,18 @@
+---
+layout: default
+title: Custom Builders and Storage
+pdf: true
+permalink: /install-builders
+toc: false
+---
+
+Currently, we support custom installation of the following builder and storage pairs. Each of these is provided
+as a plugin, so you can enable them in the same way. Instructions are included with the links below.
+
+ - [Google Cloud Build + Storage]({{ site.baseurl }}/plugin-google-build)
+
+Don't forget to go back to the [install docs](https://singularityhub.github.io/sregistry/install-server#storage) where you left off.
+
+
+
+
diff --git a/docs/pages/install/containers.md b/docs/pages/install/containers.md
index d4eb3df1..7a5272a1 100644
--- a/docs/pages/install/containers.md
+++ b/docs/pages/install/containers.md
@@ -8,12 +8,13 @@ toc: true
# Installation: Start Containers
-Whether you build or not, the compose command will bring up the application (and download containers provided on Docker Hub, if they aren't in your cache).
+Whether you build or not, the compose command will bring up the application
+(and download containers provided on Docker Hub, if they aren't in your cache).
## What containers are provided?
-Singularity Registy Server uses the following images, all provided on Docker Hub (or you can build the registry-specific ones
-locally):
+Singularity Registy Server uses the following images, all provided on Docker Hub
+(or you can build the registry-specific ones locally):
- [vanessa/sregistry](https://hub.docker.com/r/vanessa/sregistry): is the core application image, generated from the Dockerfile in the base of the repository.
- [vanessa/sregistry_nginx](https://hub.docker.com/r/vanessa/sregistry_nginx/): Is the nginx container installed with the nginx upload module, intended for use with speedy uploads. It is generated from the subfolder "nginx" in the repository.
@@ -23,17 +24,24 @@ To use these images provided, you can bring up the containers like so:
## Start Containers
```bash
-docker-compose up -d
+$ docker-compose up -d
```
-The `-d` means detached, and that you won't see any output (or errors) to the console. You can easily restart and stop containers, either specifying the container name(s) or leaving blank to apply to all containers. Note that these commands must be run in the folder with the `docker-compose.yml`:
+The `-d` means detached, and that you won't see any output (or errors) to the
+console. You can easily restart and stop containers, either specifying the
+container name(s) or leaving blank to apply to all containers. Note that these
+commands must be run in the folder with the `docker-compose.yml`:
```bash
-docker-compose restart uwsgi worker nginx
-docker-compose stop
+$ docker-compose restart uwsgi worker nginx
+$ docker-compose stop
```
-When you do `docker-compose up -d` the application should be available at `http://127.0.0.1/`, and if you've configured https, `https://127.0.0.1/`. If you need to shell into the application, for example to debug with `python manage.py shell` you can get the container id with `docker ps` and then do:
+When you do `docker-compose up -d` the application should be available at
+`http://127.0.0.1/`, and if you've configured https, `https://127.0.0.1/`.
+If you need to shell into the application, for example to debug with
+`python manage.py shell` you can get the container id with `docker ps`
+and then do:
```bash
NAME=$(docker ps -aqf "name=sregistry_uwsgi_1")
diff --git a/docs/pages/install/server.md b/docs/pages/install/server.md
index f0fbb62d..3645c375 100644
--- a/docs/pages/install/server.md
+++ b/docs/pages/install/server.md
@@ -7,9 +7,11 @@ toc: true
---
# Installation: Web Server and Storage
+
Before doing `docker-compose up -d` to start the containers, there are some specific things that need to be set up.
## Nginx
+
This section is mostly for your FYI. The nginx container that we use is a custom compiled
nginx that includes the [nginx uploads module](https://www.nginx.com/resources/wiki/modules/upload/).
This allows us to define a server block that will accept multipart form data directly, and
@@ -60,6 +62,7 @@ nginx:
the image will be built from the `nginx` folder provided in the repository.
## Under Maintenance Page
+
If it's ever the case that the Docker images need to be brought down for maintenance, a static fallback page should be available to notify the user. If you noticed in the [prepare_instance.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/prepare_instance.sh) script, one of the things we installed is nginx (on the instance). This is because we need to use it to get proper certificates for our domain (for https). Before you do this, you might want to copy the index that we've provided to replace the default (some lame page that says welcome to Nginx!) to one that you can show when the server is undergoing maintainance.
```bash
@@ -67,13 +70,34 @@ cp $INSTALL_ROOT/sregistry/scripts/nginx-index.html /var/www/html/index.html
rm /var/www/html/index.nginx-debian.html
```
+If you want your page to use the same SSL certificates, a nginx-default.conf is also
+provided that will point to the same certificates on the server (generation discussed later):
+
+```bash
+cp $INSTALL_ROOT/sregistry/scripts/nginx-default.conf /etc/nginx/conf.d/default.conf
+```
+
If you don't care about user experience during updates and server downtime, you can just ignore this.
+## Custom Domain
+
+In the [config settings file](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py#L30)
+you'll find a section for domain names, and other metadata about your registry. You will need to update
+this to be a custom hostname that you use, and custom names and unique resource identifiers for your
+registry. For example, if you have a Google Domain and are using Google Cloud, you should be able to set it up using [Cloud DNS](https://console.cloud.google.com/net-services/dns/api/enable?nextPath=%2Fzones&project=singularity-static-registry&authuser=1). Usually this means
+creating a zone for your instance, adding a Google Domain, and copying the DNS records for
+the domain into Google Domains. Sometimes it can take a few days for changes to propogate.
+We will discuss setting up https in a later section.
+
## Storage
-The containers that you upload to your registry will be stored "inside" the Docker container, specifically at the location `/var/www/images`. By default, we map this location to the host in the base directory of `sregistry` in a folder called `images`. Equally, we map static web files to a folder named `static`. If you look in the [docker-compose.yml](https://github.com/singularityhub/sregistry/blob/master/docker-compose.yml) that looks something like this:
+By default, the containers that you upload to your registry will be stored "inside" the Docker container, specifically at the location `/var/www/images`. While it would not be reasonable to upload to Singularity Registry and then to a custom Storage, we have recently added
+[custom builders]({{ site.url }}/install-builders) that can be used to push a recipe to Singularity Registry Server, and then trigger a cloud build that will be saved in some matching cloud storage.
-```
+If you choose the file system default storage, we map this location to the host in the base directory of `sregistry` in a folder called `images`. Equally, we map static web files to a folder named `static`. If you look in the [docker-compose.yml](https://github.com/singularityhub/sregistry/blob/master/docker-compose.yml) that looks something like this:
+
+
+```yaml
- ./static:/var/www/static
- ./images:/var/www/images
```
@@ -87,7 +111,17 @@ Thus, you are free to test different configurations of mounting this folder. If
## SSL
-Getting https certificates is really annoying, and getting `dhparams.pem` takes forever. But after the domain is obtained, it's important to do. Again, remember that we are working on the host, and we have an nginx server running. You should follow the instructions (and I do this manually) in [generate_cert.sh](../scripts/generate_cert.sh). It basically comes down to:
+
+Getting https certificates is really annoying, and getting `dhparams.pem` takes forever. But after the domain is obtained, it's important to do. Again, remember that we are working on the host, and we have an nginx server running. You should follow the instructions (and I do this manually) in [generate_cert.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert.sh).
+
+ - starting nginx
+ - installing certbot
+ - generating certificates
+ - linking them to where the docker-compose expects them
+ - add a reminder or some other method to renew within 89 days
+
+With certbot, you should be able to run `certbot renew` when the time to renew comes up. There is also an [older
+version](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert_tiny-acme.sh) that uses tiny-acme instead of certbot. For this second option, it basically comes down to:
- starting nginx
- installing tiny acme
@@ -96,53 +130,22 @@ Getting https certificates is really annoying, and getting `dhparams.pem` takes
- moving them to where they need to be.
- add a reminder or some other method to renew within 89 days
-Once you have done this, you should use the `docker-compose.yml` and the `nginx.conf` provided in the folder [https](https). So do something like this:
+Once you have done this (and you are ready for https), you should use the `docker-compose.yml` and the `nginx.conf` provided in the folder [https](https://github.com/singularityhub/sregistry/blob/master/https/). So do something like this:
```bash
mkdir http
mv nginx.conf http
mv docker-compose.yml http
-mv https/docker-compose.yml $PWD
-mv https/nginx.conf $PWD
+cp https/docker-compose.yml .
+cp https/nginx.conf.https nginx.conf
```
-Most importantly, we use a text file to make sure that we generate a single certificate that covers both www* and without. This part of the [generate_cert.sh](https://github.com/singularityhub/sregistry/blob/master/scripts/generate_cert.sh) you will need to update the location (town, city, etc) along with your email and the domain you are using:
-
-```bash
-cat > csr_details.txt <<-EOF
-[req]
-default_bits = 2048
-prompt = no
-default_md = sha256
-req_extensions = req_ext
-distinguished_name = dn
-
-[ dn ]
-C=US
-ST=California
-L=San Mateo County
-O=End Point
-OU=SingularityRegistry
-emailAddress=youremail@university.edu
-CN = www.domain.edu
-
-[ req_ext ]
-subjectAltName = @alt_names
-
-[ alt_names ]
-DNS.1 = domain.edu
-DNS.2 = www.domain.edu
-EOF
-```
-
-Specifically, pay close attention to the fields in the last two sections that need to be customized for the domain and region.
-
If you run into strange errors regarding any kind of authentication / server / nginx when you start the images, likely it has to do with not having moved these files, or a setting about https in the [settings](https://github.com/singularityhub/sregistry/tree/master/shub/settings). If you have trouble, please post an issue on the [issues board](https://www.github.com/singularityhub/sregistry/issues) and I'd be glad to help.
## Build the Image (Optional)
-If you want to try it, you can build the image. Note that this step isn't necessary as the image is provided on [Docker Hub](https://hub.docker.com/r/vanessa/sregistry/). This step is optional - if you want to try building locally, you would do:
+If you want to try it, you can build the image. Note that this step isn't necessary as the image is provided on [Docker Hub](https://hub.docker.com/r/vanessa/sregistry/). This step is optional. However, if you are developing you likely want to build the image locally. You can do:
```bash
diff --git a/docs/pages/install/settings.md b/docs/pages/install/settings.md
index 1153fecc..34402521 100644
--- a/docs/pages/install/settings.md
+++ b/docs/pages/install/settings.md
@@ -33,6 +33,7 @@ SECRET_KEY = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
```
### Authentication Secrets
+
One thing I (@vsoch) can't do for you in advance is produce application keys and secrets to give your Registry for each social provider that you want to allow users (and yourself) to login with. We are going to use a framework called [python social auth](https://python-social-auth-docs.readthedocs.io/en/latest/configuration/django.html) to achieve this, and in fact you can add a [number of providers](http://python-social-auth-docs.readthedocs.io/en/latest/backends/index.html) (I have set up a lot of them, including SAML, so please submit an issue if you want one added to the base proper.). Singularity Registry uses OAuth2 with a token--> refresh flow because it gives the user power to revoke permission at any point, and is a more modern strategy than storing a database of usernames and passwords. You can enable or disable as many of these that you want, and this is done in the [settings/config.py](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py):
```python
@@ -44,14 +45,19 @@ ENABLE_GITLAB_AUTH=False
ENABLE_BITBUCKET_AUTH=False
```
-and you will need at least one to log in. I've found that Github works the fastest and easiest, and then Google. Twitter now requires an actual server name and won't work with localost, but if you are deploying on a server with a proper domain go ahead and use it. All avenues are extremely specific with regard to callback urls, so you should be very careful in setting them up.
+and you will need at least one to log in. I've found that Github works the fastest and easiest, and then Google. Twitter now requires an actual server name and won't work with localost, but if you are deploying on a server with a proper domain go ahead and use it. All avenues are extremely specific with regard to callback urls, so you should be very careful in setting them up. If you want automated builds from a repository
+integration with Google Cloud Build, then you must use GitHub.
-Other authentication methods, such as LDAP, are implemented as [plugins](https://singularityhub.github.io/sregistry/plugins/) to sregistry. See the [plugins documentation](https://singularityhub.github.io/sregistry/plugins/) for details on how to configure these.
+#### Plugins
+Other authentication methods, such as LDAP, are implemented as [plugins](https://singularityhub.github.io/sregistry/plugins/) to sregistry. See the [plugins documentation](https://singularityhub.github.io/sregistry/plugins/) for details on how to configure these. You should also now look here to see which plugins you will
+want to set up (and then build into your container).
-We will walk through the setup of each in detail. For all of the below, you should put the content in your `secrets.py` under settings. Note that if you are deploying locally, you will need to put localhost (127.0.0.1) as your domain, and Github is now the only one that worked reliably without an actual domain for me.
+For authentication plugins, we will walk through the setup of each in detail here.
+For other plugins, you should look at the [plugins](https://singularityhub.github.io/sregistry/plugins/) documentation now before proceeding. For all of the below, you should put the content in your `secrets.py` under settings. Note that if you are deploying locally, you will need to put localhost (127.0.0.1) as your domain, and Github is now the only one that worked reliably without an actual domain for me.
#### Google OAuth2
+
You first need to [follow the instructions](https://developers.google.com/identity/protocols/OpenIDConnect) and setup an OAuth2 API credential. The redirect URL should be every variation of having http/https, and www. and not. (Eg, change around http-->https and with and without www.) of `https://www.sregistry.org/complete/google-oauth2/`. Google has good enough debugging that if you get this wrong, it will give you an error message with what is going wrong. You should store the credential in `secrets.py`, along with the complete path to the file for your application:
@@ -153,7 +159,7 @@ the callback url here should be `http://[your-domain]/complete/twitter`.
### Config
-In the [config.py](../shub/settings/config.py) you need to define the following:
+In the [config.py](https://github.com/singularityhub/sregistry/blob/master/shub/settings/config.py) you need to define the following:
#### Domain Name
@@ -176,7 +182,7 @@ You need to define a registry uri, and different contact information:
```
HELP_CONTACT_EMAIL = 'vsochat@stanford.edu'
-HELP_INSTITUTION_SITE = 'srcc.stanford.edu'
+HELP_INSTITUTION_SITE = 'https://srcc.stanford.edu'
REGISTRY_NAME = "Tacosaurus Computing Center"
REGISTRY_URI = "taco"
```
@@ -194,6 +200,17 @@ USER_COLLECTIONS=True
Setting `USER_COLLECTIONS` to False also means that users cannot create [Teams](/sregistry/setup#teams), which are organized groups of users that then can be added as contributors to individual collections. With this setting as True, any authenticated user, staff, or administrator can create and manage new collections and teams, and this is done by issuing a token.
+Finally, you can also allow users to create collections, but limit the number created.
+
+```
+# Limit users to N collections (None is unlimited)
+USER_COLLECTION_LIMIT = None
+```
+
+The default is None, meaning that users can create unlimited collections, given that `USER_COLLECTIONS`
+is True. If you set this to a non-zero positive integer, user collections will be limited to
+this number. If a user is staff or an administrator, they are not subject to this limit.
+
#### Registry Private
By default Singularity Registry will provide public images, with an option to set them to private. If you are working with sensitive data and/or images, you might want all images to be private, with no option to make public. You can control that with the variable `PRIVATE_ONLY`.
diff --git a/docs/pages/introduction.md b/docs/pages/introduction.md
index 1254813c..04aeff72 100644
--- a/docs/pages/introduction.md
+++ b/docs/pages/introduction.md
@@ -177,8 +177,9 @@ As was stated in the base [README.md](/sregistry/) The components of the applica
- **vanessa/sregistry**: is the main uwsgi application, which serves a Django (python-based) application.
- **nginx**: pronounced (engine-X) is the webserver. The starter application is configured for http, however you should follow the instructions to set up https properly. Note that we build a custom nginx image that takes advantage of the [nginx upload module](https://www.nginx.com/resources/wiki/modules/upload/).
- - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [celery](http://www.celeryproject.org/), a distributed job queue that fits nicely into Django. The celery worker uses a
+ - **worker**: is the same uwsgi image, but with a running command that is specialized to perform tasks. The tasks are run via [django-rq](https://github.com/rq/django-rq) that uses a
- **redis**: database to organize the jobs themselves.
+ - **scheduler** jobs can be scheduled using the scheduler.
This means that, given a pretty basic server to run the application, and enough space connected to it to store the images, you can bring the entire thing up relatively quickly. Awesome! Let's get started and talk about first steps of [install](/sregistry/install). Or read about [use cases first](/sregistry/use-cases)
diff --git a/docs/pages/plugins/README.md b/docs/pages/plugins/README.md
index b902e11f..b4dc434e 100644
--- a/docs/pages/plugins/README.md
+++ b/docs/pages/plugins/README.md
@@ -2,7 +2,7 @@
layout: default
title: Plugins
pdf: true
-permalink: /plugins
+permalink: /plugins/
toc: false
---
@@ -24,12 +24,12 @@ your registries' local `shub/settings/secrets.py` file.
- [PAM-Auth](/sregistry/plugin-pam): authentication using PAM (unix host users)
- [Globus](/sregistry/plugin-globus): connect and transfer using Globus
- [SAML](/sregistry/plugin-saml): Authentication with SAML
+ - [Google Build](/sregistry/plugin-google-build) provides build and storage on Google Cloud.
## Writing a Plugin
An sregistry plugin is a Django App, that lives inside `shub/plugins/`.
-
-The plugin interface is currently under development. At present, each plugin:
+Each plugin:
- Must provide a `urls.py` listing any URLs that will be exposed under `/plugin-name`
- Can provide additional, models, views, templates, static files.
diff --git a/docs/pages/plugins/google_build/README.md b/docs/pages/plugins/google_build/README.md
new file mode 100644
index 00000000..571f8c7d
--- /dev/null
+++ b/docs/pages/plugins/google_build/README.md
@@ -0,0 +1,319 @@
+---
+layout: default
+title: "Plugin: Custom Builder and Storage"
+pdf: true
+permalink: /plugin-google-build
+toc: true
+---
+
+# Plugin: Google Cloud Build and Storage
+
+The Singularity Registry client allows for [a large set](https://singularityhub.github.io/sregistry-cli/clients) of options for external storage endpoints. Specifically, this plugin uses storage and build provided by Google, meaning:
+
+ - [Google Build](https://singularityhub.github.io/sregistry-cli/client-google-build)
+ - [Google Storage](https://singularityhub.github.io/sregistry-cli/client-google-storage)
+
+Other cloud vendors have been included with sregistry client (AWS, S3, Minio) and equivalent
+build and storage pairs can be added here. If you would like to discuss adding a builder
+and storage pair, please [open an issue](https://www.github.com/singularityhub/sregistry).
+
+Don't forget to go back to the [install docs](https://singularityhub.github.io/sregistry/install-settings) where you left off. This quick start will walk through setting up custom storage using
+[Google Cloud Build](https://singularityhub.github.io/sregistry-cli/client-google-build)
+and [Google Storage](https://singularityhub.github.io/sregistry-cli/client-google-storage) as
+an endpoint.
+
+## Configure sregistry
+
+By default, google build is disabled. To configure sregistry to
+use Google Cloud build and Storage, in settings/config.py you can enable the plugin by
+uncommenting it from the list here:
+
+```bash
+PLUGINS_ENABLED = [
+# 'ldap_auth',
+# 'saml_auth',
+# 'globus',
+ 'google_build'
+]
+```
+
+And uncomment installing the google build client in the Dockerfile:
+
+```bash
+# Ensure Google Build Installed
+# RUN pip install sregistry[google-build]
+```
+
+You will need to build the image locally, with other additional
+changes (usually plugins) you want enabled:
+
+```bash
+$ docker build -t vanessa/sregistry .
+```
+
+### Secrets
+
+Next, set the following variables in `shub/settings/secrets.py`,
+that you can create from `dummy_secrets.py` in the shub/settings folder.
+The first two speak for themselves, your project name and path to your
+Google Application Credentials.
+
+#### Project Identifiers
+
+```python
+# =============================================================================
+# Google Cloud Build + Storage
+# Configure a custom builder and storage endpoint
+# =============================================================================
+
+# google-storage, s3, google-drive, dropbox
+GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
+SREGISTRY_GOOGLE_PROJECT=myproject-ftw
+
+```
+
+You can create custom [Google Application Credentials](https://cloud.google.com/docs/authentication/getting-started) for your server in the browser, and it will be enough to make the service account
+a project owner. If you are on a Google Cloud instance you can scp (with gcloud) using the command line as follows:
+
+```bash
+$ gcloud compute scp [credentials].json $USER@[INSTANCE]:/tmp --project [PROJECT]
+```
+
+Keep in mind that the path to the Google credentials file must be
+within the container (/code is the root folder that is bound to the filesystem).
+
+#### Build Caching
+
+```python
+SREGISTRY_GOOGLE_BUILD_CACHE="true"
+```
+
+If you set this variable (to anything), it means that after build, you will not
+delete intermediate dependencies in cloudbuild bucket (keep them as cache for rebuild if needed).
+This defaults to being unset, meaning that files are cleaned up. If you define this as anything,
+the build files will be cached.
+
+#### Build Limit
+
+```python
+SREGISTRY_GOOGLE_BUILD_LIMIT=100
+```
+
+To prevent denial of service attacks on Google Cloud Storage, you should
+set a reasonable limit for the number of active, concurrent builds. This
+number should be based on your expected number of users, repositories, and
+recipes per repository.
+
+
+#### Singularity Version
+
+By default, we use the default version that is set by the [Google Build](https://singularityhub.github.io/sregistry-cli/client-google-build#environment) client that belongs to Singularity Registry Client.
+However, as this value is subject to be updated, we recommend that you set it in your
+secrets and can then decide when you want to update.
+
+```python
+SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION="v3.2.1-slim"
+```
+
+The version must coincide with a container tag hosted under [singularityware/singularity](https://hub.docker.com/r/singularityware/singularity/).
+
+#### Storage Bucket Name
+
+By default, the bucket name will be called `sregistry-gcloud-build-[hostname]`, and since
+your host is a docker container, that will resolve to a random set of numbers and
+letters. For this reason, we *strongly recommend you set a consistent hostname*.
+If you do not and need to remove and bring up the containers again, the bucket
+metadata will not match the new bucket name. Here is an example of how to set a custom name:
+
+```python
+SREGISTRY_GOOGLE_STORAGE_BUCKET="taco-singularity-registry"
+```
+
+Additionally, a temporary bucket is created with the same name ending in `_cloudbuild`. This bucket is for build time dependencies, and is cleaned up after the fact. If you are having trouble getting a bucket it is likely because the name is taken,
+and we recommend creating both `[name]` and `[name]_cloudbuild` in the console and then setting the name here.
+
+#### Build Timeout
+
+The number of seconds for the build to timeout. If set to None, will be 10 minutes. If
+unset, it will default to 3 hours. This time should be less than the `SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS`. If
+you want to use the default, don't define this variable in your secrets.
+
+```python
+# SREGISTRY_GOOGLE_BUILD_TIMEOUT_SECONDS=None
+```
+
+
+#### Build Expiration
+
+You must define the number of seconds that your build expires in, meaning that it would no
+longer be accepted by the server.
+
+```python
+SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS=28800
+```
+
+The default provided in the dummy secrets, shown above, would indicate 8 hours.
+
+#### Private Containers
+
+By default, images that you upload will be made public, meaning that a user that stumbles on the URL (or has permission to read your bucket otherwise) will be able to see and download them. If you want to make images globally private you should export this variable as some derivative of yes/true. If no variable is found, images are made public by default.
+
+```python
+SREGISTRY_GOOGLE_STORAGE_PRIVATE=True
+```
+
+These variables are written in detail in the dummy_secrets.py file.
+If you need more information, you can read [the Google Cloud Build page](https://singularityhub.github.io/sregistry-cli/client-google-build).
+
+If you are missing some variable, there will be an error message
+on interaction with the Google Cloud Build API since you won't be able to
+authenticate. Once your settings are ready to go, you will want to continue
+with the [install docs](https://singularityhub.github.io/sregistry/install-server#storage) where you left off,
+and you can continue here after you've done:
+
+```
+$ docker-compose up -d
+```
+
+and confirmed the registry running at localhost, and also have logged in
+(so you have an account with permission to push containers and recipes.)
+
+## Singularity Registry Client
+
+If you haven't yet, you will need the [sregistry client](https://singularityhub.github.io/sregistry-cli/) in order to push recipes to build with Google Cloud Build. The minimum version that supports this
+is `0.2.19`. An easy way to install is any of the following:
+
+```bash
+$ pip install sregistry[google-build]
+$ pip install sregistry[google-build-basic] # without local sqlite database
+```
+
+Next, export the client to be your registry.
+
+```
+$ export SREGISTRY_CLIENT=registry
+```
+
+If you are reading here from the installation docs, you likely haven't
+brought up your registry and should [return there](https://singularityhub.github.io/sregistry/install-settings) where you left off.
+
+## Building Containers
+
+There are two ways to trigger builds:
+
+ 1. Automated trigger from GitHub webhooks
+ 2. Manual push of a recipe
+
+The recommended approach is to enable GitHub authentication and then
+have pushes to your repository trigger builds. For the second approach,
+while you can upload a recipe directly, it is not recommended
+as it doesn't have the recipe kept under any version control.
+
+### Trigger from Github
+
+You will first need to log in with GitHub, and then navigate to the
+container collections page (the "Containers" link in the navigation):
+
+
+
+If the Google Build plugin is correctly enabled, you'll see a second option on the
+right:
+
+
+
+Select this, and your repositories (and organizations) that you granted
+permission to connect to will show up. You can select one:
+
+
+
+Once you've connected the repository, an initial build will build
+the latest version of the recipes that are discovered. Any recipe that
+is in the format `Singularity.` or just `Singularity` (tag defaults
+to latest) will be built.
+
+
+
+If you have two recipes named equivalently in different folders, the
+recipe that was more recently updated will be used.
+
+### Push a Recipe
+
+When the server is started and the client is ready, it's time to push a recipe
+to build! By default, you will need to specify the name of the collection and
+container, and to include the fact that you want to use Google Cloud Build.
+You'll need to install Singularity Registry Client version 0.2.21 or later:
+
+````bash
+$ pip install sregistry[google-build]>=0.2.21
+$ pip install sregistry[google-build-basic]>=0.2.21 # without local database
+```
+
+Then to submit a build, you'll need to grab your credentials from https:///token.
+You can write them to your Singularity Registry secrets at `$HOME/.sregistry`. Once your
+token and registry base are defined, you will need to create the collection
+in the web interface first to establish yourself as an owner. **You cannot
+push to a collection that does not exist**. Once the collection is
+created (for example, below I created the collection "collection"), you can push like this:
+
+```bash
+$ sregistry build --name registry://collection/container:tag Singularity --builder google_build
+```
+
+Notice that we specify the builder to be "google_build." Also notice
+that the command simply requires a name for your collection (it doesn't
+need to exist, but you need push access and to have [exported your token](https://singularityhub.github.io/sregistry/credentials) to your local machine.
+
+If you get this error:
+
+```bash
+[================================] 0/0 MB - 00:00:00
+Recipe upload failed: 403 Client Error: Forbidden for url: https://containers.page/google_build/build/.
+```
+
+you forgot to create a collection called "collection" and need to make it in the interface before
+proceeding.
+
+## Pull Containers
+
+Once you have a container, you of course want to pull it! You can use
+the Singularity Client to do this. Let's say that our server is at `https://www.containers.page`:
+
+```bash
+$ singularity pull shub://containers.page/singularityhub/hello-registry:latest
+ 760.00 KiB / 760.00 KiB [=========================================================================================] 100.00% 5.22 MiB/s 0s
+```
+
+And there you have it!
+
+```bash
+$ ls
+hello-registry_latest.sif
+
+$ singularity run hello-registry_latest.sif
+Tacotacotaco!
+```
+
+Note that having a custom registry name (containers.page, in the above example)
+was a bug in early versions of Singularity 3.x. if you have trouble with
+this command, you will need to upgrade Singularity.
+
+You can technically also just pull it with simple bash commands, if you
+don't want to rely on Singularity.
+
+```bash
+$ wget $(curl https://containers.page/api/container/singularityhub/hello-registry:latest | jq --raw-output .image)
+```
+
+If you want to pull with Singularity (but get the error) you can also do this:
+
+```bash
+$ singularity pull $(curl https://containers.page/api/container/singularityhub/hello-registry:latest | jq --raw-output .image)
+```
+
+Finally, it should be pointed out that you can use the Google Builder integration
+from your command line without having a registry at all. [Singularity Registry Client](https://singularityhub.github.io/sregistry-cli/client-google-build) can serve to build and then pull the image on its own.
+
+
+
+
+
diff --git a/docs/pages/plugins/ldap/README.md b/docs/pages/plugins/ldap/README.md
index 098829e0..329e6c24 100644
--- a/docs/pages/plugins/ldap/README.md
+++ b/docs/pages/plugins/ldap/README.md
@@ -34,7 +34,7 @@ with unencrypted, StartTLS, and SSL access to an OpenLDAP directory.
## Quick Start
This quick start is intended to demonstrate basic functionality of the LDAP server, and you should
-review the links referenced above for more detail. After you've completed basic setup in
+review the links referenced above for more detail.
### What is LDAP?
diff --git a/docs/pages/setup/register.md b/docs/pages/setup/register.md
index bcf08212..70f33485 100644
--- a/docs/pages/setup/register.md
+++ b/docs/pages/setup/register.md
@@ -90,7 +90,11 @@ thumb: custom/taco-logo.png
You can then add your files, and submit a PR to the main repo. We will have tests that ping your registry to ensure correct naming of files and registry address, along with a preview of the content that is added. If you want to prevew locally, you can run `jekyll serve`.
-Great! Now that you have your accounts, you probably want to learn about how to build and push images! First you need to generate a [credential](/sregistry/credentials), and then you will can read about the [client](/sregistry/client).
+Great! Now that you have your accounts, you probably want to learn about how to build and push images!
+To push directly, you will first need to generate a [credential](/sregistry/credentials). If you
+have enabled the [Google Build+Github]({{ site.baseurl }}/plugin-google-build) plugin,
+then you will be able to log in with GitHub, and connect GitHub repositories to build
+on commit. Either way, you should next read about the [client](/sregistry/client).
diff --git a/docs/pages/setup/roles.md b/docs/pages/setup/roles.md
index 4ff6b8a0..2a0dd1dd 100644
--- a/docs/pages/setup/roles.md
+++ b/docs/pages/setup/roles.md
@@ -30,7 +30,15 @@ is an anonymous user of the registry. In the case of a private registry, this in
Based on the above and granted that you are setting up the server and reading this, you will be a **superuser** because you have permissions to control the Docker images and grant other users (and yourself) the ability to push with the role **admin**.
-Next, learn how users can be a part of [teams](/sregistry/setup-teams)
+# Google Build + GitHub
+
+If you have enabled the [Google Build+Github]({{ site.baseurl }}/plugin-google-build) plugin,
+then your users will be able to log in with GitHub, and build collections that are
+linked to GitHub repositories. In this case, permissions for the registry interaction
+do not extend to GitHub. For example, if you build from a repository that you own,
+adding a collaborator or another owner will not change anything on GitHub.
+
+Speaking of collaborators, next, learn how users can be a part of [teams](/sregistry/setup-teams)
diff --git a/docs/pages/setup/teams.md b/docs/pages/setup/teams.md
index 20cf8845..4757fd02 100644
--- a/docs/pages/setup/teams.md
+++ b/docs/pages/setup/teams.md
@@ -7,6 +7,7 @@ toc: false
---
# Teams
+
To add a level of organization of users, sregistry has created loose groups of users called Teams. A registry admin can create a team, or if `USER_COLLECTIONS` is True, the an authenticated user can also create them. Creating a team means that the creator (admin or authenticated user) becomes the Owner of the team that can add and remove users. If an admin creates a team for a group of users, he or she must manage it or add a user to the list of owners to do the same. To create a team:
1. Click on the "teams" tab in the navigation bar
diff --git a/docs/pages/use-cases.md b/docs/pages/use-cases.md
index 45ad51f1..006df77d 100644
--- a/docs/pages/use-cases.md
+++ b/docs/pages/use-cases.md
@@ -17,7 +17,8 @@ In this use case, I am an individual user, or share a computer resource with a s
My university runs a shared computational resource manages a registry on a server next to it. Akin to supplying software modules, the administrators keep a version controlled repo of build recipes, and when software needs to be updated, create a new image with a tag for the version. The users can then use the images by way of specifying the unique resource identifier.
## Collaborative Cluster Registry
-It's often the case that pipelines are maintained internally within labs, or eventually discarded after papers are published and graduate students finish. In this use case, a large cluster wants to provide a central, organized resource for the scientific containers generated by its researchers. Perhaps alongside or instead of the core software and tools, this cluster decides to build final or published containers for its users. Building might lead to a private image for use at the institution, or a public image that can be referenced in a publication and easily disseminated. To build, the researcher simply might submit a pull request to a Github repo associated with the registry, it can be built and tested and discussed, and when ready, pushed to the resource from the continuous integration, or by the cluster's particular build server. Either way, the final upload is an authenticated, single line call to push the image with an appropriate name and tag.
+It's often the case that pipelines are maintained internally within labs, or eventually discarded after papers are published and graduate students finish. In this use case, a large cluster wants to provide a central, organized resource for the scientific containers generated by its researchers. Perhaps alongside or instead of the core software and tools, this cluster decides to build final or published containers for its users. Building might lead to a private image for use at the institution, or a public image that can be referenced in a publication and easily disseminated. To build, the researcher simply might submit a pull request to a Github repo associated with the registry, it can be built and tested and discussed, and when ready, pushed to the resource from the continuous integration, or by the cluster's particular build server. Either way, the final upload is an authenticated, single line call to push the image with an appropriate name and tag. If you
+add [plugins](/sregistry-cli/plugins) you can also have custom authentication and builds (e.g., GitHub webhooks + Google Cloud Build).
If you are a single user and looking for an image management tool, perhaps to work with images in multiple locations beyond a Singularity Registry, Server then you will be interested in the [Singularity Global Client](https://singularityhub.github.io/sregistry-cli).
diff --git a/https/docker-compose.yml b/https/docker-compose.yml
index d268dbaa..b9f1c26c 100644
--- a/https/docker-compose.yml
+++ b/https/docker-compose.yml
@@ -8,6 +8,9 @@ uwsgi:
- .:/code
- ./static:/var/www/static
- ./images:/var/www/images
+ # uncomment for PAM auth
+ #- /etc/passwd:/etc/passwd
+ #- /etc/shadow:/etc/shadow
links:
- redis
- db
@@ -32,13 +35,21 @@ nginx:
redis:
restart: always
image: redis:latest
- ports:
- - "6379:6379"
+scheduler:
+ image: vanessa/sregistry
+ command: python /code/manage.py rqscheduler
+ volumes:
+ - .:/code
+ volumes_from:
+ - uwsgi
+ links:
+ - redis
+ - db
worker:
image: vanessa/sregistry
- command: celery worker -A shub.celery -Q default -n default@%h -B
+ command: python /code/manage.py rqworker default
volumes:
- .:/code
volumes_from:
diff --git a/requirements.txt b/requirements.txt
index f6ddf91b..82734c54 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,41 +1,44 @@
+anyjson
+coreapi==2.3.3
cython
-h5py
-uwsgi
-django>=1.11.18
-social-auth-app-django
-social-auth-core[saml]
-python-social-auth
-djangorestframework
-markdown
-django-filter
+django-chosen
+django-crispy-forms
+django-datatables-view
+django-dirtyfields
django-extensions
-django-taggit
+django-filter
django-form-utils
-django-crispy-forms
+django-gravatar2
+django-guardian
+django-hstore==1.3.5
django-notifications-hq
+django-rest-swagger
+django-rq
+django-taggit
django-taggit-templatetags
-django-dirtyfields
-psycopg2-binary
+django-user-agents
+django>=1.11.18
+djangorestframework
+google
+google-api-python-client
+h5py
+ipython
+markdown
numexpr
-shapely
+oauth2client==3.0
Pillow
+PyYAML==5.1
+psycopg2-binary
+pygments
+python3-saml
+python-social-auth
requests
requests-oauthlib
requests-toolbelt
-celery[redis]<4
-django-celery
-django-chosen
-opbeat
-django-hstore==1.3.5
-django-datatables-view
-sregistry==0.0.97
-django-gravatar2
-pygments
-google-api-python-client
-google
-oauth2client==3.0
retrying
-django-rest-swagger
-django-user-agents
-django-guardian
-python3-saml
+rq-scheduler
+shapely
+social-auth-app-django
+social-auth-core[saml]
+sregistry[all-basic]>=0.2.19
+uwsgi
diff --git a/run_uwsgi.sh b/run_uwsgi.sh
index e7b2baa3..dd48ca6c 100755
--- a/run_uwsgi.sh
+++ b/run_uwsgi.sh
@@ -2,14 +2,11 @@
python manage.py makemigrations
python manage.py migrate auth
-python manage.py makemigrations users
-python manage.py makemigrations main
-python manage.py makemigrations api
-python manage.py makemigrations logs
python manage.py migrate
python manage.py collectstatic --noinput
service cron start
+
if grep -Fxq "PLUGINS_ENABLED+=[\"globus\"]" /code/shub/settings/config.py
then
# When configured, we can start the endpoint
diff --git a/scripts/generate_cert.sh b/scripts/generate_cert.sh
old mode 100755
new mode 100644
index f4b6e721..ffce55b8
--- a/scripts/generate_cert.sh
+++ b/scripts/generate_cert.sh
@@ -4,45 +4,54 @@
#
#
-INSTALL_ROOT=${1}
-EMAIL=${2}
+EMAIL=${1}
DOMAIN=${2}
-STATE=${3:-California}
-COUNTY=${4:-San Mateo County}
-
-sudo mkdir /opt/acme_tiny
-cd /tmp && git clone https://github.com/diafygi/acme-tiny
-sudo mv acme-tiny /opt/acme-tiny/
-sudo chown $USER -R /opt/acme-tiny
-
-# Create a directory for the keys and cert
-cd $INSTALL_ROOT/sregistry
-
-# If you started the images, stop nginx
-docker-compose stop nginx
-sudo service nginx start
-
-# backup old key and cert
-if [ -f "/etc/ssl/private/domain.key" ]
- then
- sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)}
-fi
-
-if [ -f "/etc/ssl/certs/chained.pem" ]
- then
- sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)}
-fi
-
-if [ -f "/etc/ssl/certs/domain.csr" ]
- then
- sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)}
-fi
-
-# Generate a private account key, if doesn't exist
-if [ ! -f "/etc/ssl/certs/account.key" ]
- then
- openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs
-fi
+INSTALL_ROOT=$HOME
+
+# Install certbot (if not already done)
+sudo add-apt-repository ppa:certbot/certbot
+sudo apt-get update
+sudo apt-get install python-certbot-nginx
+
+# Get certificates (might need sudo)
+certbot certonly --nginx -d "${DOMAIN}" -d "www.${DOMAIN}" --email "${EMAIL}" --agree-tos --redirect
+
+# The prompt is interactive, and will show the locations of certificates
+
+# Obtaining a new certificate
+# Performing the following challenges:
+# http-01 challenge for containers.page
+# http-01 challenge for www.containers.page
+# Waiting for verification...
+# Cleaning up challenges
+
+# IMPORTANT NOTES:
+# - Congratulations! Your certificate and chain have been saved at:
+# /etc/letsencrypt/live/containers.page/fullchain.pem
+# Your key file has been saved at:
+# /etc/letsencrypt/live/containers.page/privkey.pem
+# Your cert will expire on 2019-09-04. To obtain a new or tweaked
+# version of this certificate in the future, simply run certbot
+# again. To non-interactively renew *all* of your certificates, run
+# "certbot renew"
+# - Your account credentials have been saved in your Certbot
+# configuration directory at /etc/letsencrypt. You should make a
+# secure backup of this folder now. This configuration directory will
+# also contain certificates and private keys obtained by Certbot so
+# making regular backups of this folder is ideal.
+# - If you like Certbot, please consider supporting our work by:
+
+# Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate
+# Donating to EFF: https://eff.org/donate-le
+
+# Since the containers expect these files to be in /etc/ssl, copy there
+# This CANNOT be a link.
+sudo cp /etc/letsencrypt/live/containers.page/fullchain.pem /etc/ssl/certs/chained.pem
+sudo cp /etc/letsencrypt/live/containers.page/privkey.pem /etc/ssl/private/domain.key
+
+# Create recursive backup
+backup=$(echo /etc/letsencrypt{,.bak.$(date +%s)} | cut -d ' ' -f 2)
+sudo cp -R /etc/letsencrypt $backup
# Add extra security
if [ ! -f "/etc/ssl/certs/dhparam.pem" ]
@@ -50,57 +59,6 @@ if [ ! -f "/etc/ssl/certs/dhparam.pem" ]
openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs
fi
-if [ ! -f "csr_details.txt" ]
-then
-
-cat > csr_details.txt <<-EOF
-[req]
-default_bits = 2048
-prompt = no
-default_md = sha256
-req_extensions = req_ext
-distinguished_name = dn
-
-[ dn ]
-C=US
-ST=$STATE
-L=$COUNTY
-O=End Point
-OU=$DOMAIN
-emailAddress=$EMAIL
-CN = www.$DOMAIN
-
-[ req_ext ]
-subjectAltName = @alt_names
-
-[ alt_names ]
-DNS.1 = $DOMAIN
-DNS.2 = www.$DOMAIN
-EOF
-
-fi
-
-# Call openssl
-openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt )
-
-# Create a CSR for $DOMAIN
-#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr
-sudo mv domain.csr /etc/ssl/certs/domain.csr
-sudo mv domain.key /etc/ssl/private/domain.key
-
-# Create the challenge folder in the webroot
-sudo mkdir -p /var/www/html/.well-known/acme-challenge/
-sudo chown $USER -R /var/www/html/
-
-# Get a signed certificate with acme-tiny
-#docker-compose stop nginx
-python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt
-
-wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem
-cat signed.crt intermediate.pem > chained.pem
-sudo mv chained.pem /etc/ssl/certs/
-rm signed.crt intermediate.pem
-
# Stop nginx
sudo service nginx stop
diff --git a/scripts/generate_certs.sh b/scripts/generate_certs.sh
new file mode 100755
index 00000000..f4b6e721
--- /dev/null
+++ b/scripts/generate_certs.sh
@@ -0,0 +1,108 @@
+#! /bin/bash
+#
+# nginx should be installed on the host machine
+#
+#
+
+INSTALL_ROOT=${1}
+EMAIL=${2}
+DOMAIN=${2}
+STATE=${3:-California}
+COUNTY=${4:-San Mateo County}
+
+sudo mkdir /opt/acme_tiny
+cd /tmp && git clone https://github.com/diafygi/acme-tiny
+sudo mv acme-tiny /opt/acme-tiny/
+sudo chown $USER -R /opt/acme-tiny
+
+# Create a directory for the keys and cert
+cd $INSTALL_ROOT/sregistry
+
+# If you started the images, stop nginx
+docker-compose stop nginx
+sudo service nginx start
+
+# backup old key and cert
+if [ -f "/etc/ssl/private/domain.key" ]
+ then
+ sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)}
+fi
+
+if [ -f "/etc/ssl/certs/chained.pem" ]
+ then
+ sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)}
+fi
+
+if [ -f "/etc/ssl/certs/domain.csr" ]
+ then
+ sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)}
+fi
+
+# Generate a private account key, if doesn't exist
+if [ ! -f "/etc/ssl/certs/account.key" ]
+ then
+ openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs
+fi
+
+# Add extra security
+if [ ! -f "/etc/ssl/certs/dhparam.pem" ]
+ then
+ openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs
+fi
+
+if [ ! -f "csr_details.txt" ]
+then
+
+cat > csr_details.txt <<-EOF
+[req]
+default_bits = 2048
+prompt = no
+default_md = sha256
+req_extensions = req_ext
+distinguished_name = dn
+
+[ dn ]
+C=US
+ST=$STATE
+L=$COUNTY
+O=End Point
+OU=$DOMAIN
+emailAddress=$EMAIL
+CN = www.$DOMAIN
+
+[ req_ext ]
+subjectAltName = @alt_names
+
+[ alt_names ]
+DNS.1 = $DOMAIN
+DNS.2 = www.$DOMAIN
+EOF
+
+fi
+
+# Call openssl
+openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt )
+
+# Create a CSR for $DOMAIN
+#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr
+sudo mv domain.csr /etc/ssl/certs/domain.csr
+sudo mv domain.key /etc/ssl/private/domain.key
+
+# Create the challenge folder in the webroot
+sudo mkdir -p /var/www/html/.well-known/acme-challenge/
+sudo chown $USER -R /var/www/html/
+
+# Get a signed certificate with acme-tiny
+#docker-compose stop nginx
+python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt
+
+wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem
+cat signed.crt intermediate.pem > chained.pem
+sudo mv chained.pem /etc/ssl/certs/
+rm signed.crt intermediate.pem
+
+# Stop nginx
+sudo service nginx stop
+
+cd $INSTALL_ROOT/sregistry
+docker-compose up -d
diff --git a/scripts/generate_certs_tiny-acme.sh b/scripts/generate_certs_tiny-acme.sh
new file mode 100755
index 00000000..f4b6e721
--- /dev/null
+++ b/scripts/generate_certs_tiny-acme.sh
@@ -0,0 +1,108 @@
+#! /bin/bash
+#
+# nginx should be installed on the host machine
+#
+#
+
+INSTALL_ROOT=${1}
+EMAIL=${2}
+DOMAIN=${2}
+STATE=${3:-California}
+COUNTY=${4:-San Mateo County}
+
+sudo mkdir /opt/acme_tiny
+cd /tmp && git clone https://github.com/diafygi/acme-tiny
+sudo mv acme-tiny /opt/acme-tiny/
+sudo chown $USER -R /opt/acme-tiny
+
+# Create a directory for the keys and cert
+cd $INSTALL_ROOT/sregistry
+
+# If you started the images, stop nginx
+docker-compose stop nginx
+sudo service nginx start
+
+# backup old key and cert
+if [ -f "/etc/ssl/private/domain.key" ]
+ then
+ sudo cp /etc/ssl/private/domain.key{,.bak.$(date +%s)}
+fi
+
+if [ -f "/etc/ssl/certs/chained.pem" ]
+ then
+ sudo cp /etc/ssl/certs/chained.pem{,.bak.$(date +%s)}
+fi
+
+if [ -f "/etc/ssl/certs/domain.csr" ]
+ then
+ sudo cp /etc/ssl/certs/domain.csr{,.bak.$(date +%s)}
+fi
+
+# Generate a private account key, if doesn't exist
+if [ ! -f "/etc/ssl/certs/account.key" ]
+ then
+ openssl genrsa 4096 > account.key && sudo mv account.key /etc/ssl/certs
+fi
+
+# Add extra security
+if [ ! -f "/etc/ssl/certs/dhparam.pem" ]
+ then
+ openssl dhparam -out dhparam.pem 4096 && sudo mv dhparam.pem /etc/ssl/certs
+fi
+
+if [ ! -f "csr_details.txt" ]
+then
+
+cat > csr_details.txt <<-EOF
+[req]
+default_bits = 2048
+prompt = no
+default_md = sha256
+req_extensions = req_ext
+distinguished_name = dn
+
+[ dn ]
+C=US
+ST=$STATE
+L=$COUNTY
+O=End Point
+OU=$DOMAIN
+emailAddress=$EMAIL
+CN = www.$DOMAIN
+
+[ req_ext ]
+subjectAltName = @alt_names
+
+[ alt_names ]
+DNS.1 = $DOMAIN
+DNS.2 = www.$DOMAIN
+EOF
+
+fi
+
+# Call openssl
+openssl req -new -sha256 -nodes -out domain.csr -newkey rsa:2048 -keyout domain.key -config <( cat csr_details.txt )
+
+# Create a CSR for $DOMAIN
+#sudo openssl req -new -sha256 -key /etc/ssl/private/domain.key -subj "/CN=$DOMAIN" > domain.csr
+sudo mv domain.csr /etc/ssl/certs/domain.csr
+sudo mv domain.key /etc/ssl/private/domain.key
+
+# Create the challenge folder in the webroot
+sudo mkdir -p /var/www/html/.well-known/acme-challenge/
+sudo chown $USER -R /var/www/html/
+
+# Get a signed certificate with acme-tiny
+#docker-compose stop nginx
+python /opt/acme-tiny/acme_tiny.py --account-key /etc/ssl/certs/account.key --csr /etc/ssl/certs/domain.csr --acme-dir /var/www/html/.well-known/acme-challenge/ > ./signed.crt
+
+wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > intermediate.pem
+cat signed.crt intermediate.pem > chained.pem
+sudo mv chained.pem /etc/ssl/certs/
+rm signed.crt intermediate.pem
+
+# Stop nginx
+sudo service nginx stop
+
+cd $INSTALL_ROOT/sregistry
+docker-compose up -d
diff --git a/scripts/globus/robotnamer.py b/scripts/globus/robotnamer.py
index 21523cc9..375cd4ef 100755
--- a/scripts/globus/robotnamer.py
+++ b/scripts/globus/robotnamer.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
'''
-
Copyright (C) 2019 Vanessa Sochat.
This Source Code Form is subject to the terms of the
@@ -48,15 +47,14 @@ class RobotNamer:
def generate(self, delim='-', length=4, chars='0123456789'):
- '''
- Generate a robot name. Inspiration from Haikunator, but much more
+ '''Generate a robot name. Inspiration from Haikunator, but much more
poorly implemented ;)
- Parameters
- ==========
- delim: Delimiter
- length: TokenLength
- chars: TokenChars
+ Parameters
+ ==========
+ delim: Delimiter
+ length: TokenLength
+ chars: TokenChars
'''
descriptor = self._select(self._descriptors)
diff --git a/scripts/nginx-default.conf b/scripts/nginx-default.conf
new file mode 100644
index 00000000..b030bff0
--- /dev/null
+++ b/scripts/nginx-default.conf
@@ -0,0 +1,49 @@
+# This is provided for the instance in case https is needed outside of the
+# docker-compose. You can move this file to /etc/nginx/conf.d/default.conf
+# and then bring up nginx (sudo service nginx start)
+
+server {
+ listen *:80;
+ server_name localhost;
+
+ client_max_body_size 8000M;
+ client_body_buffer_size 8000M;
+ client_body_timeout 120;
+
+ add_header X-Clacks-Overhead "GNU Terry Pratchett";
+ add_header X-Clacks-Overhead "GNU Terry Pratchet";
+ add_header Access-Control-Allow-Origin *;
+ add_header 'Access-Control-Allow-Credentials' 'true';
+ add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
+ add_header 'Access-Control-Allow-Headers' 'Authorization,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
+
+ root /var/www/html;
+ index index.html
+ try_files $uri /index.html;
+}
+
+server {
+
+ listen 443;
+ server_name localhost;
+
+ client_max_body_size 2000M;
+ client_body_buffer_size 2000M;
+
+ ssl on;
+ #ssl_certificate /etc/letsencrypt/live/singularity-hub.org/fullchain.pem;
+ ssl_certificate /etc/ssl/certs/chained.pem;
+ ssl_certificate_key /etc/ssl/private/domain.key;
+ #ssl_certificate_key /etc/letsencrypt/live/singularity-hub.org/privkey.pem;
+ ssl_session_timeout 5m;
+ ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+ ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA;
+ ssl_session_cache shared:SSL:50m;
+ ssl_dhparam /etc/ssl/certs/dhparam.pem;
+ ssl_prefer_server_ciphers on;
+
+ root /var/www/html;
+ index index.html
+ try_files $uri /index.html;
+}
+
diff --git a/scripts/prepare_instance.sh b/scripts/prepare_instance.sh
index 7f71b84f..6e53294d 100644
--- a/scripts/prepare_instance.sh
+++ b/scripts/prepare_instance.sh
@@ -3,6 +3,9 @@
# Change this to where you want to install. $HOME
# is probably a bad choice if it needs to be maintained
# by a group of people
+
+# This was developed on Ubuntu 18.04 LTS on Google Cloud
+
INSTALL_ROOT=$HOME
# Prepare instance (or machine) with Docker, docker-compose, python
@@ -20,31 +23,40 @@ sudo pip install ipaddress
sudo pip install oauth2client
-# Python 3
-wget https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh
-bash Anaconda3-4.2.0-Linux-x86_64.sh -b
-
-# You might already have anaconda installed somewhere
-PATH=$HOME/anaconda3/bin:$PATH
-rm Anaconda3-4.2.0-Linux-x86_64.sh
-export PATH
+# Install Docker dependencies
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ gnupg-agent \
+ software-properties-common
# Add docker key server
-sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+# OK
+
+# $ sudo apt-key fingerprint 0EBFCD88
+# pub rsa4096 2017-02-22 [SCEA]
+# 9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88
+# uid [ unknown] Docker Release (CE deb)
+# sub rsa4096 2017-02-22 [S]
+
+# Add stable repository
+ sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
# Install Docker!
sudo apt-get update &&
-sudo apt-get install apt-transport-https ca-certificates &&
-sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" | sudo tee --append /etc/apt/sources.list.d/docker.list
-sudo apt-get update &&
-apt-cache policy docker-engine
-sudo apt-get update &&
-sudo apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual &&
-sudo apt-get -y install docker-engine &&
-sudo service docker start
+sudo apt-get install -y docker-ce docker-ce-cli containerd.io
+
+# test, you will still need sudo
+sudo docker run hello-world
+
+# Docker group should already exist
+# sudo groupadd docker
-#sudo docker run hello-world
#make sure to add all users that will maintain / use the registry
sudo usermod -aG docker $USER
@@ -53,11 +65,16 @@ sudo apt -y install docker-compose
# Note that you will need to log in and out for changes to take effect
-if [ ! -d $INSTALL_ROOT/singularity-registry ]
-then
- cd $INSTALL_ROOT
- git clone https://www.github.com/singularityhub/sregistry.git
- cd sregistry
- docker build -t vanessa/sregistry .
- docker-compose up -d
+if [ ! -d $INSTALL_ROOT/sregistry ]; then
+ cd $INSTALL_ROOT
+
+ # production
+ # git clone https://www.github.com/singularityhub/sregistry.git
+
+ # development
+ git clone -b add/builders https://www.github.com/singularityhub/sregistry.git
+
+ cd sregistry
+ docker build -t vanessa/sregistry .
+ docker-compose up -d
fi
diff --git a/shub/__init__.py b/shub/__init__.py
index e09a930d..e69de29b 100644
--- a/shub/__init__.py
+++ b/shub/__init__.py
@@ -1,4 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-from .celery import shubcelery as celery_app
-
-__all__ = ['celery_app']
diff --git a/shub/apps/api/actions/create.py b/shub/apps/api/actions/create.py
index b950935a..82a15d76 100644
--- a/shub/apps/api/actions/create.py
+++ b/shub/apps/api/actions/create.py
@@ -9,13 +9,11 @@
'''
from shub.settings import MEDIA_ROOT
-from sregistry.utils import parse_image_name
from shub.logger import bot
+from sregistry.utils import parse_image_name
from django.db import IntegrityError
-from django.db.utils import DataError
+import django_rq
import shutil
-import uuid
-import json
import os
def move_upload_to_storage(collection, upload_id):
@@ -36,7 +34,7 @@ def move_upload_to_storage(collection, upload_id):
# Rename the file, moving from ImageUpload to Storage
filename = os.path.basename(instance.file.path)
- new_path = os.path.join(image_home, filename.replace('.part', '.simg'))
+ new_path = os.path.join(image_home, filename.replace('.part', '.sif'))
shutil.move(instance.file.path, new_path)
print('%s --> %s' %(instance.file.path, new_path))
instance.file.name = new_path
@@ -52,7 +50,7 @@ def generate_nginx_storage_path(collection, source, dest):
source: the source file (under /var/www/images/_upload/{0-9}
dest: the destination filename
'''
- image_home = "%s/%s" %(MEDIA_ROOT, collection.name)
+ image_home = os.path.join(MEDIA_ROOT, collection.name)
return os.path.join(image_home, os.path.basename(dest))
@@ -68,7 +66,7 @@ def move_nginx_upload_to_storage(collection, source, dest):
dest: the destination filename
'''
# Create collection root, if it doesn't exist
- image_home = "%s/%s" %(MEDIA_ROOT, collection.name)
+ image_home = os.path.join(MEDIA_ROOT, collection.name)
if not os.path.exists(image_home):
os.mkdir(image_home)
@@ -76,6 +74,19 @@ def move_nginx_upload_to_storage(collection, source, dest):
shutil.move(source, new_path)
return new_path
+def calculate_version(cid):
+ '''calculate version is run as a separate task after a container upload.
+ Instead of using md5 provided by nginx we calculate sha256 sum and
+ then include as the version variable.
+ '''
+ from shub.apps.main.views import get_container
+ from sregistry.utils import get_file_hash
+ print("Calculating version for upload.")
+ container = get_container(cid)
+ version = "sha256.%s" % get_file_hash(container.image.datafile.path, "sha256")
+ container.version = version
+ container.save()
+
def upload_container(cid, user, name, version, upload_id, size=None):
'''save an uploaded container, usually coming from an ImageUpload
@@ -97,9 +108,8 @@ def upload_container(cid, user, name, version, upload_id, size=None):
error / success codes.
'''
- from shub.apps.main.models import ( Container, Collection )
- from shub.apps.api.models import ( ImageUpload, ImageFile )
- from shub.apps.main.views import update_container_labels
+ from shub.apps.main.models import (Container, Collection)
+ from shub.apps.api.models import (ImageUpload, ImageFile)
collection = Collection.objects.get(id=cid)
# Only continue if user is an owner
@@ -142,7 +152,7 @@ def upload_container(cid, user, name, version, upload_id, size=None):
# If one exists, we check if it's frozen
create_new = True
- if len(containers) > 0:
+ if containers:
# If we already have a container, it might be frozen
container = containers[0]
@@ -186,6 +196,9 @@ def upload_container(cid, user, name, version, upload_id, size=None):
# Once the container is saved, delete the intermediate file object
delete_file_instance(instance)
+ # Run a task to calculate the sha256 sum
+ django_rq.enqueue(calculate_version, cid=container.id)
+
def delete_file_instance(instance):
'''a helper function to remove the file assocation, and delete the instance
diff --git a/shub/apps/api/actions/delete.py b/shub/apps/api/actions/delete.py
index d9135b30..87ad7a67 100644
--- a/shub/apps/api/actions/delete.py
+++ b/shub/apps/api/actions/delete.py
@@ -1,6 +1,6 @@
'''
-Copyright (C) 2017-2018 Vanessa Sochat.
+Copyright (C) 2017-2019 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
@@ -9,14 +9,13 @@
'''
from shub.logger import bot
-from sregistry.main.registry.auth import generate_timestamp
from shub.apps.api.utils import validate_request
-
+from sregistry.main.registry.auth import generate_timestamp
def delete_container(request, container):
'''delete a container only given authentication to do so'''
- auth=request.META.get('HTTP_AUTHORIZATION', None)
+ auth = request.META.get('HTTP_AUTHORIZATION', None)
if auth is None:
bot.debug("authentication is invalid.")
@@ -29,7 +28,7 @@ def delete_container(request, container):
container.tag)
bot.debug("Request payload %s" %payload)
- if not validate_request(auth,payload,"delete",timestamp):
+ if not validate_request(auth, payload, "delete", timestamp):
bot.debug("request is invalid.")
return False
diff --git a/shub/apps/api/actions/push.py b/shub/apps/api/actions/push.py
index 36c531d4..13064d88 100644
--- a/shub/apps/api/actions/push.py
+++ b/shub/apps/api/actions/push.py
@@ -8,22 +8,24 @@
'''
-from shub.logger import bot
+
from django.http import JsonResponse
+from django.views.decorators.csrf import csrf_exempt
+
from rest_framework.exceptions import PermissionDenied
from shub.apps.main.models import Collection
-from django.views.decorators.csrf import csrf_exempt
from shub.apps.main.utils import format_collection_name
from shub.apps.api.utils import (
validate_request,
has_permission,
get_request_user
)
+from sregistry.main.registry.auth import generate_timestamp
import json
import uuid
-from sregistry.main.registry.auth import generate_timestamp
+
@csrf_exempt
def collection_auth_check(request):
@@ -31,15 +33,15 @@ def collection_auth_check(request):
return a collection id (cid) if a collection exists and the user
has permission to upload. If not, a permission denied is returned.
'''
- auth=request.META.get('HTTP_AUTHORIZATION', None)
+ auth = request.META.get('HTTP_AUTHORIZATION', None)
# Load the body, which is json with variables
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
# Get variables
- tag=body.get('tag','latest')
- name=body.get('name')
+ tag = body.get('tag', 'latest')
+ name = body.get('name')
collection_name = format_collection_name(body.get('collection'))
print(tag, name, collection_name, auth, body)
@@ -72,8 +74,12 @@ def collection_auth_check(request):
# 2- the user is a superuser or staff
# 3- the user is owner of a collection
if not has_permission(auth, collection, pull_permission=False):
- raise PermissionDenied(detail="Unauthorized")
-
+ raise PermissionDenied(detail="Unauthorized")
+
+ # If the user cannot create a new collection
+ if not owner.has_create_permission():
+ raise PermissionDenied(detail="Unauthorized")
+
# If we get here user has create permission, does collection exist?
if collection is None:
collection = Collection.objects.create(name=collection_name,
@@ -83,4 +89,4 @@ def collection_auth_check(request):
collection.save()
# Return json response with collection id
- return JsonResponse({'cid': collection.id })
+ return JsonResponse({'cid': collection.id})
diff --git a/shub/apps/api/actions/upload.py b/shub/apps/api/actions/upload.py
index d5d5c905..746f66ac 100644
--- a/shub/apps/api/actions/upload.py
+++ b/shub/apps/api/actions/upload.py
@@ -8,25 +8,25 @@
'''
-from shub.logger import bot
from urllib.parse import unquote
from django.http import JsonResponse
from django.contrib.auth.mixins import LoginRequiredMixin
import json
from django.shortcuts import redirect
+from django.views.decorators.csrf import csrf_exempt
+from django.views.generic.base import TemplateView
+from django.contrib import messages
from shub.apps.main.models import Collection
-from sregistry.main.registry.auth import generate_timestamp
from shub.apps.api.utils import (
get_request_user,
has_permission,
validate_request
)
+from sregistry.main.registry.auth import generate_timestamp
+
from rest_framework.exceptions import PermissionDenied
-from django.views.decorators.csrf import csrf_exempt
-from django.views.generic.base import TemplateView
-from django.contrib import messages
import os
@@ -91,12 +91,12 @@ def upload_complete(request):
name = "%s:%s" %(name, tag)
# Expected params are upload_id, name, md5, and cid
- message = upload_container(cid = collection.id,
- user = owner,
- version = version,
- upload_id = path,
- name = name,
- size = size)
+ message = upload_container(cid=collection.id,
+ user=owner,
+ version=version,
+ upload_id=path,
+ name=name,
+ size=size)
# If the function doesn't return a message (None), indicates success
if message is None:
diff --git a/shub/apps/api/models.py b/shub/apps/api/models/__init__.py
similarity index 86%
rename from shub/apps/api/models.py
rename to shub/apps/api/models/__init__.py
index a5b016df..27d2d98c 100644
--- a/shub/apps/api/models.py
+++ b/shub/apps/api/models/__init__.py
@@ -9,10 +9,9 @@
'''
from django.contrib.contenttypes.models import ContentType
-from django.core.files.storage import FileSystemStorage
-from django.db.models.signals import post_save
from django.conf import settings
from django.db import models
+from .storage import OverwriteStorage
import uuid
import time
import hashlib
@@ -29,8 +28,7 @@ def get_upload_to(instance, filename):
def get_upload_folder(instance, filename):
'''a helper function to upload to storage
'''
- from shub.apps.main.models import Container, Collection
- tag = instance.tag.lower()
+ from shub.apps.main.models import Collection
collection_name = instance.collection.lower()
instance.collection = collection_name
@@ -43,7 +41,7 @@ def get_upload_folder(instance, filename):
collection.save()
# Create collection root, if it doesn't exist
- image_home = "%s/%s" %(settings.MEDIA_ROOT,collection_name)
+ image_home = "%s/%s" %(settings.MEDIA_ROOT, collection_name)
if not os.path.exists(image_home):
os.mkdir(image_home)
@@ -53,20 +51,13 @@ def get_upload_folder(instance, filename):
################################################################################
-# MODELS & STORAGE
+# MODELS
################################################################################
-class OverwriteStorage(FileSystemStorage):
-
- def get_available_name(self, name, max_length=None):
- # If the filename already exists, remove it as if it was a true file system
- if self.exists(name):
- os.remove(os.path.join(settings.MEDIA_ROOT, name))
- return name
-
-
class ImageFile(models.Model):
+ '''an ImageFile is a Singularity container pushed directly.
+ '''
created = models.DateTimeField(auto_now_add=True)
collection = models.CharField(max_length=200, null=False)
tag = models.CharField(max_length=200, null=False)
diff --git a/shub/apps/api/models/storage.py b/shub/apps/api/models/storage.py
new file mode 100644
index 00000000..92f22084
--- /dev/null
+++ b/shub/apps/api/models/storage.py
@@ -0,0 +1,25 @@
+'''
+
+Copyright (C) 2017-2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.core.files.storage import FileSystemStorage
+from django.conf import settings
+import os
+
+################################################################################
+# STORAGE
+################################################################################
+
+class OverwriteStorage(FileSystemStorage):
+
+ def get_available_name(self, name, max_length=None):
+ # If the filename already exists, remove it as if it was a true file system
+ if self.exists(name):
+ os.remove(os.path.join(settings.MEDIA_ROOT, name))
+ return name
diff --git a/shub/apps/api/tasks.py b/shub/apps/api/tasks.py
index 8902b3e0..867bd987 100644
--- a/shub/apps/api/tasks.py
+++ b/shub/apps/api/tasks.py
@@ -8,21 +8,19 @@
'''
-from celery import shared_task, Celery
-from django.conf import settings
-import os
+from shub.logger import bot
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shub.settings')
-app = Celery('shub')
-app.config_from_object('django.conf:settings','shub.settings')
-app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
-
-
-@shared_task
def expire_share(sid):
+ '''expire a share based on its id, meaning deleting it so that
+ it can no longer be used.
+
+ Parameters
+ ==========
+ sid: the share id to expire
+ '''
from shub.apps.main.models import Share
try:
share = Share.objects.get(id=sid)
share.delete()
except Share.DoesNotExist:
- bot.warning("Share %s expired." %sid)
+ bot.warning("Share %s expired." % sid)
diff --git a/shub/apps/api/templates/rest_framework_swagger/base.html b/shub/apps/api/templates/rest_framework_swagger/base.html
index e9461b76..b2de7bb4 100644
--- a/shub/apps/api/templates/rest_framework_swagger/base.html
+++ b/shub/apps/api/templates/rest_framework_swagger/base.html
@@ -1,88 +1,70 @@
+{% load i18n %}
{% load staticfiles %}
-
+
Swagger UI
-
-
-
-
-
-
-
+
+
+
{% block extra_styles %}
{# -- Add any additional CSS scripts here -- #}
{% endblock %}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-{% block header %}
-
- {% if edit_permission %}{% endif %}
+ {% if container.image %}
+ {% if edit_permission %}{% endif %}
+ {% if container.frozen %}
+
+ {% else %}
+
+ {% endif %}
+ {% if edit_permission %}{% endif %}
+ {% else %}
+ {% if container.metadata.builder.name == "google_build" %}
+ {% include "google_build/status.html" %}
+ {% endif %}{% endif %}
+
{{ container.tag }}
{{ container.add_date }}
- {% if container.image %}
+ {% if container.image %}
{% if 'globus' in PLUGINS_ENABLED %}{% include "globus/_container_details.html" %}{% endif %}
{% endif %}
+ {% for key, value in container.metrics.items %}
+
{{ key|to_space }}: {{ value }}
+ {% endfor %}
+
+
+ {% endif %}
+
+
+ {% endif %}
{% if container.metadata.runscript %}
diff --git a/shub/apps/main/urls/collections.py b/shub/apps/main/urls/collections.py
index 1254ecd7..62a63d83 100644
--- a/shub/apps/main/urls/collections.py
+++ b/shub/apps/main/urls/collections.py
@@ -15,16 +15,17 @@
urlpatterns = [
url(r'^collections/?$', views.all_collections, name="collections"),
- url(r'^collections/(?P\d+)/edit/?$',views.edit_collection,name='edit_collection'),
- url(r'^collections/new/?$',views.new_collection,name='new_collection'),
- url(r'^collections/(?P\d+)/settings/?$',views.collection_settings,name='collection_settings'),
- url(r'^collections/(?P\d+)/contributors/?$',views.edit_contributors,name='edit_contributors'),
- url(r'^collections/(?P\d+)/?$',views.view_collection,name='collection_details'),
- url(r'^collections/my/?$',views.my_collections,name='my_collections'),
+ url(r'^collections/(?P\d+)/edit/?$', views.edit_collection,name='edit_collection'),
+ url(r'^collections/new/?$', views.new_collection,name='new_collection'),
+ url(r'^collections/(?P\d+)/settings/?$', views.collection_settings,name='collection_settings'),
+ url(r'^collections/(?P\d+)/contributors/?$', views.edit_contributors,name='edit_contributors'),
+ url(r'^collections/(?P\d+)/?$', views.view_collection, name='collection_details'),
+ url(r'^collections/my/?$', views.my_collections,name='my_collections'),
url(r'^collections/(?P\d+)/usage/?$', views.collection_commands,name='collection_commands'),
url(r'^collections/(?P\d+)/delete/?$', views.delete_collection,name='delete_collection'),
url(r'^collections/(?P\d+)/private/?$',views.make_collection_private,name='make_collection_private'),
- url(r'^collections/(?P\d+)/public/?$',views.make_collection_public,name='make_collection_public')
+ url(r'^collections/(?P\d+)/public/?$', views.make_collection_public,name='make_collection_public'),
+ url(r'^collections/(?P.+?)/(?P.+?)/?$', views.view_named_collection, name='collection_byname')
]
diff --git a/shub/apps/main/utils.py b/shub/apps/main/utils.py
index d43c08d6..79e418b0 100644
--- a/shub/apps/main/utils.py
+++ b/shub/apps/main/utils.py
@@ -9,28 +9,28 @@
'''
from datetime import timedelta
+from django.conf import settings
from django.utils import timezone
-from shub.apps.main.models import Container
from sregistry.utils import read_json
-from django.conf import settings
-from itertools import chain
import os
import re
-import requests
import tempfile
def get_nightly_comparisons(date=None):
'''load the latest nightly comparisons.
- :param date: if provided, will load specified date instead of latest.
+
+ Parameters
+ ==========
+ date: if provided, will load specified date instead of latest.
'''
- root = os.path.abspath(os.path.join(settings.MEDIA_ROOT,'trees','nightly'))
- base_name = "%s/container-tree" %(root)
- if date == None:
+ root = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'trees', 'nightly'))
+ base_name = "%s/container-tree" % root
+ if date is None:
date = "latest"
- base_name = "%s-%s.json" %(base_name,date)
+ base_name = "%s-%s.json" %(base_name, date)
if os.path.exists(base_name):
return read_json(base_name)
return None
@@ -40,7 +40,7 @@ def write_tmpfile(memory_file):
'''save tmp will extract a file to a temporary location
'''
tmpdir = tempfile.mkdtemp()
- file_name = '%s/%s' %(tmpdir,memory_file.name)
+ file_name = '%s/%s' %(tmpdir, memory_file.name)
with open(file_name, 'wb+') as dest:
for chunk in memory_file.chunks():
dest.write(chunk)
@@ -53,12 +53,12 @@ def format_collection_name(collection_name):
return collection_name.strip('-').lower()
-def format_container_name(name,special_characters=None):
+def format_container_name(name, special_characters=None):
'''format_container_name will take a name supplied by the user,
- remove all special characters (except for those defined by "special-characters"
- and return the new image name.
+ remove all special characters (except for those defined by "special-characters"
+ and return the new image name.
'''
- if special_characters == None:
+ if special_characters is None:
special_characters = []
return ''.join(e.lower() for e in name if e.isalnum() or e in special_characters)
@@ -66,14 +66,14 @@ def format_container_name(name,special_characters=None):
def validate_share(share):
'''compare the share expiration date with the current date
- Parameters
- ==========
- share: a shub.apps.main.models.Share object, holding a container,
- and an expiration date.
+ Parameters
+ ==========
+ share: a shub.apps.main.models.Share object, holding a container,
+ and an expiration date.
- Returns
- =======
- True if valid, False if not. If False, will delete share.
+ Returns
+ =======
+ True if valid, False if not. If False, will delete share.
'''
today = timezone.now()
if today <= share.expire_date:
diff --git a/shub/apps/main/views/__init__.py b/shub/apps/main/views/__init__.py
index 6e97956f..ee129643 100644
--- a/shub/apps/main/views/__init__.py
+++ b/shub/apps/main/views/__init__.py
@@ -22,11 +22,13 @@
edit_collection,
edit_contributors,
get_collection,
+ get_collection_named,
make_collection_private,
make_collection_public,
new_collection,
my_collections,
- view_collection
+ view_collection,
+ view_named_collection
)
diff --git a/shub/apps/main/views/collections.py b/shub/apps/main/views/collections.py
index b604e4ca..2ed8e298 100644
--- a/shub/apps/main/views/collections.py
+++ b/shub/apps/main/views/collections.py
@@ -8,15 +8,14 @@
'''
+from shub.settings import PRIVATE_ONLY
+from shub.apps.users.views import validate_credentials
+from shub.apps.main.utils import format_collection_name
from shub.apps.main.models import (
Container,
- Collection,
- Star
+ Collection
)
-from sregistry.utils import read_file
-from shub.apps.users.views import validate_credentials
-from shub.apps.main.utils import format_collection_name
from django.shortcuts import (
render,
redirect
@@ -25,13 +24,9 @@
from django.http.response import Http404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
-from shub.settings import PRIVATE_ONLY
from itertools import chain
-import os
-import re
import uuid
-import pickle
@@ -42,7 +37,6 @@ def get_collection(cid):
Parameters
==========
cid: the id of the collection to look up
-
'''
keyargs = {'id':cid}
try:
@@ -52,6 +46,24 @@ def get_collection(cid):
else:
return collection
+def get_collection_named(name, retry=True):
+ '''get a collection by name. First we try the collection name,
+ then we try splitting if the name includes /
+
+ Parameters
+ ==========
+ name: the name of the collection to look up
+ '''
+ try:
+ collection = Collection.objects.get(name=name)
+ except Collection.DoesNotExist:
+ if retry is True and "/" in name:
+ name = name.split('/')[0]
+ return get_collection_named(name, retry=False)
+ raise Http404
+ else:
+ return collection
+
################################################################################
# COLLECTIONS ##################################################################
@@ -114,7 +126,7 @@ def new_collection(request):
collection.owners.add(request.user)
collection.save()
- messages.info(request, 'Collection %s created.' %name)
+ messages.info(request, 'Collection %s created.' % name)
return redirect('collection_details', cid=collection.id)
# Just new collection form, not a post
@@ -126,24 +138,40 @@ def new_collection(request):
return redirect("collections")
-
-
-def view_collection(request, cid):
+def view_named_collection(request, username, reponame):
'''View container build details (all container builds for a repo)
+ This is the same as view_collection but we look up the collection
+ by its full name.
Parameters
==========
- cid: the collection id
-
+ username: the owner of the collection
+ reponame: the collection name
'''
+ # First attempt, try looking up collection by username/reponame
+ try:
+ collection = Collection.objects.get(name="%s/%s" %(username, reponame))
+ except Collection.DoesNotExist:
- collection = get_collection(cid)
+ # Then look for username only
+ try:
+ collection = Collection.objects.get(name=username)
+ except:
+ raise Http404
+
+ return _view_collection(request, collection)
+
+def _view_collection(request, collection):
+ '''a shared function to finish up checking permissions for viewing
+ a collection, and returning to the user. Called by view_named_collection
+ and view_collection
+ '''
edit_permission = collection.has_edit_permission(request)
view_permission = collection.has_view_permission(request)
# If private, and not the owner, no go.
if collection.private and not view_permission:
- messages.info(request,"This collection is private.")
+ messages.info(request, "This collection is private.")
return redirect('collections')
# If the user is logged in, see if there is a star
@@ -157,6 +185,18 @@ def view_collection(request, cid):
return render(request, 'collections/view_collection.html', context)
+def view_collection(request, cid):
+ '''View container build details (all container builds for a repo)
+
+ Parameters
+ ==========
+ cid: the collection id
+
+ '''
+
+ collection = get_collection(cid)
+ return _view_collection(request, collection)
+
def collection_settings(request, cid):
'''Collection settings is the entrypoint for editing the builder, branches,
@@ -168,7 +208,6 @@ def collection_settings(request, cid):
Parameters
==========
cid: the id of the collection
-
'''
from shub.apps.users.permissions import has_create_permission
from shub.apps.users.models import Team
@@ -182,14 +221,14 @@ def collection_settings(request, cid):
contrib_ids = [x.id for x in collection.contributors.all()]
if request.user not in collection.owners.all():
- messages.info(request,"Only owners can change collection settings")
+ messages.info(request, "Only owners can change collection settings")
return redirect('collection_details', cid=collection.id)
if not edit_permission:
- messages.info(request,"You are not permitted to perform this action.")
+ messages.info(request, "You are not permitted to perform this action.")
return redirect('collections')
- context = {'collection':collection,
+ context = {'collection': collection,
'teams': Team.objects.all(),
'owners_ids': owners_ids,
'contrib_ids': contrib_ids,
@@ -199,7 +238,6 @@ def collection_settings(request, cid):
return render(request, 'collections/collection_settings.html', context)
-
def edit_collection(request, cid):
'''edit collection will let the user specify a different image for
their builds, in the case that the provided isn't large enough, etc.
@@ -209,12 +247,11 @@ def edit_collection(request, cid):
cid: the id of the collection
'''
-
collection = get_collection(cid)
edit_permission = collection.has_edit_permission(request)
if not edit_permission:
- messages.info(request,"You are not permitted to perform this action.")
+ messages.info(request, "You are not permitted to perform this action.")
return redirect('collections')
if request.method == "POST":
@@ -233,7 +270,7 @@ def edit_collection(request, cid):
context = {'collection':collection,
- 'edit_permission':edit_permission }
+ 'edit_permission':edit_permission}
return render(request, 'collections/edit_collection.html', context)
@@ -252,7 +289,7 @@ def collection_commands(request, cid):
# If private, and not the owner, no go.
if not collection.has_view_permission(request):
- messages.info(request,"This collection is private.")
+ messages.info(request, "This collection is private.")
return redirect('collections')
context = {"collection":collection}
@@ -260,7 +297,7 @@ def collection_commands(request, cid):
-def delete_collection(request,cid):
+def delete_collection(request, cid):
'''delete a container collection
Parameters
@@ -272,7 +309,7 @@ def delete_collection(request,cid):
# Only an owner can delete
if not collection.has_edit_permission(request):
- messages.info(request,"This action is not permitted.")
+ messages.info(request, "This action is not permitted.")
return redirect('collections')
# Delete files before containers
@@ -282,13 +319,11 @@ def delete_collection(request,cid):
container.delete()
collection.delete()
- messages.info(request,'Collection successfully deleted.')
+ messages.info(request, 'Collection successfully deleted.')
return redirect('collections')
-
-
################################################################################
# COLLECTION PRIVACY / ACTIVE
################################################################################
@@ -303,29 +338,27 @@ def _change_collection_privacy(request, collection, make_private=True):
request: the request object with user permissions, etc.
collection: the collection to make private
make_private: boolean, True indicates asking for private
-
'''
edit_permission = collection.has_edit_permission(request)
# Customize message based on making public or private
status = "private"
- if make_private == False:
+ if not make_private:
status = "public"
# If the user has edit permission, make the repo private
- if edit_permission is True:
-
+ if edit_permission:
collection.private = make_private
- messages.info(request,"Collection set to %s." %(status))
+ messages.info(request, "Collection set to %s." % status)
collection.save()
else:
- messages.info(request,"You need permissions to perform this operation.")
+ messages.info(request, "You need permissions to perform this operation.")
return collection
@login_required
-def change_collection_privacy(request,cid,make_private=True):
+def change_collection_privacy(request, cid, make_private=True):
'''change collection privacy, if the user has permission
Parameters
@@ -345,32 +378,28 @@ def change_collection_privacy(request,cid,make_private=True):
@login_required
-def make_collection_private(request,cid):
+def make_collection_private(request, cid):
'''make collection private will make a collection private
Parameters
==========
cid: the collection id to make private
-
'''
return change_collection_privacy(request, cid, make_private=True)
@login_required
-def make_collection_public(request,cid):
+def make_collection_public(request, cid):
'''make collection public will make a collection public
Parameters
==========
cid: the collection id to make public
-
'''
- if PRIVATE_ONLY is True:
- messages.info(request,"This registry only allows private collections.")
+ if PRIVATE_ONLY:
+ messages.info(request, "This registry only allows private collections.")
return redirect('collection_details', cid=cid)
- return change_collection_privacy(request,cid,make_private=False)
-
-
+ return change_collection_privacy(request, cid, make_private=False)
################################################################################
@@ -386,7 +415,6 @@ def _edit_contributors(userids, collection, add_user=True, level="contributor"):
userids: a string list, or single string of a user id
add_user: if True, perform add on the collection. If False, remove.
level: one of contributor or owner.
-
'''
from shub.apps.users.utils import get_user
@@ -422,9 +450,6 @@ def edit_contributors(request, cid):
collection = get_collection(cid)
- # Who are current contributors?
- contributors = collection.contributors.all()
-
# Who are current owners?
owners = collection.owners.all()
diff --git a/shub/apps/main/views/compare.py b/shub/apps/main/views/compare.py
index 6dc19c2f..3b2aba6d 100644
--- a/shub/apps/main/views/compare.py
+++ b/shub/apps/main/views/compare.py
@@ -13,8 +13,6 @@
Collection
)
-from django.template import loader, Context
-from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import (
render,
@@ -22,84 +20,93 @@
)
from django.contrib import messages
-from itertools import chain
import datetime
-import os
-import json
-import re
-import shutil
-###############################################################################################
-# FILE SYSTEM USAGE ###########################################################################
-###############################################################################################
+################################################################################
+# FILE SYSTEM USAGE ############################################################
+################################################################################
def generate_size_data(collections, collection_level):
'''generate a datastructure that can be rendered as:
- id,value
- flare,
- flare.analytics,
- flare.analytics.cluster,
- flare.analytics.cluster.AgglomerativeCluster,3938,1
- flare.analytics.cluster.CommunityStructure,3812,2
- flare.analytics.cluster.HierarchicalCluster,6714,3
- flare.analytics.cluster.MergeEdge,743,4
- flare.analytics.graph,,5
- flare.analytics.graph.BetweennessCentrality,3534,6
- flare.analytics.graph.LinkDistance,5731,7
- flare.analytics.graph.MaxFlowMinCut,7840,8
- flare.analytics.graph.ShortestPaths,5914,9
- flare.analytics.graph.SpanningTree,3416,10
+ id,value
+ flare,
+ flare.analytics,
+ flare.analytics.cluster,
+ flare.analytics.cluster.AgglomerativeCluster,3938,1
+ flare.analytics.cluster.CommunityStructure,3812,2
+ flare.analytics.cluster.HierarchicalCluster,6714,3
+ flare.analytics.cluster.MergeEdge,743,4
+ flare.analytics.graph,,5
+ flare.analytics.graph.BetweennessCentrality,3534,6
+ flare.analytics.graph.LinkDistance,5731,7
+ flare.analytics.graph.MaxFlowMinCut,7840,8
+ flare.analytics.graph.ShortestPaths,5914,9
+ flare.analytics.graph.SpanningTree,3416,10
'''
data = dict()
for collection in collections:
- if collection.name not in data:
- data[collection.name] = {}
+
+ collection_name = collection.name
+ if "/" in collection_name:
+ collection_name = collection_name.split('/')[0]
+
+ if collection_name not in data:
+ data[collection_name] = {}
# Generate data on the level of containers
if collection_level is False:
+
containers = collection.containers.all()
for container in containers:
- if container.name not in data[collection.name]:
- data[collection.name][container.name] = dict()
+
+ # Automated builds keep entire repo name under name
+ container_name = container.name
+ if "/" in container_name:
+ container_name = container_name.split('/')[1]
+
+ if container_name not in data[collection_name]:
+ data[collection_name][container_name] = dict()
if 'size_mb' in container.metadata:
- data[collection.name][container.name][container.tag] = {"size": container.metadata['size_mb'],
- "id": container.id }
+ data[collection_name][container_name][container.tag] = {"size": container.metadata['size_mb'],
+ "id": container.id}
+ elif "size_mb" in container.metrics:
+ data[collection_name][container_name][container.tag] = {"size": container.metrics['size_mb'],
+ "id": container.id}
+
# Generate data on the level of collections
else:
- data[collection.name] = {'size': collection.total_size(),
+ data[collection_name] = {'size': collection.total_size(),
'id': collection.id,
- 'n': collection.containers.count() }
+ 'n': collection.containers.count()}
return data
def get_filtered_collections(request):
'''return all collections or only public, given user accessing
- this function will return all collections based on a permission level
+ this function will return all collections based on a permission level
'''
private = True
if not request.user.is_anonymous:
if request.user.is_superuser or request.user.is_staff is True:
- private = True
+ private = True
if not private:
return Collection.objects.all()
return Collection.objects.filter(private=False)
-
### Treemap Views and Context
-
def generate_treemap_context(request):
collections = get_filtered_collections(request)
containers = Container.objects.filter(collection__in=collections)
date = datetime.datetime.now().strftime('%m-%d-%y')
return {"generation_date": date,
"containers_count": containers.count(),
- "collections_count": collections.count() }
+ "collections_count": collections.count()}
def containers_treemap(request):
@@ -108,30 +115,30 @@ def containers_treemap(request):
'''
context = generate_treemap_context(request)
if context['containers_count'] >= settings.VISUALIZATION_TREEMAP_COLLECTION_SWITCH:
- return collections_treemap(request,context)
+ return collections_treemap(request, context)
return render(request, "singularity/containers_treemap.html", context)
-def collections_treemap(request,context=None):
+def collections_treemap(request, context=None):
''' collection treemap shows total size of a collection'''
if context is None:
context = generate_treemap_context(request)
return render(request, "singularity/collections_treemap.html", context)
-def collection_treemap(request,cid):
+def collection_treemap(request, cid):
''' collection treemap shows size of containers across a single collection'''
try:
collection = Collection.objects.get(id=cid)
except Collection.DoesNotExist:
- messages.info(request,"This collection could not be found.")
+ messages.info(request, "This collection could not be found.")
return redirect("collections_treemap")
if not collection.has_view_permission(request):
- messages.info(request,"You don't have permission to view this collection.")
+ messages.info(request, "You don't have permission to view this collection.")
return redirect("collections_treemap")
- context = {'collection':collection,
+ context = {'collection': collection,
'generation_date': datetime.datetime.now().strftime('%m-%d-%y')}
return render(request, "singularity/collection_treemap.html", context)
@@ -144,7 +151,7 @@ def base_size_data(request, collection_level=False, collections=None):
if collections is None:
collections = get_filtered_collections(request)
collections = generate_size_data(collections, collection_level)
- return {'collections':collections}
+ return {'collections': collections}
def container_size_data(request):
context = base_size_data(request)
@@ -158,17 +165,17 @@ def collection_size_data(request):
return render(request, 'singularity/collection_size_data.csv', context)
-def single_collection_size_data(request,cid):
+def single_collection_size_data(request, cid):
''' generate size data for single collection treemap
'''
try:
collection = Collection.objects.get(id=cid)
except Collection.DoesNotExist:
- messages.info(request,"This collection could not be found.")
+ messages.info(request, "This collection could not be found.")
return redirect("collections_treemap")
if not collection.has_view_permission(request):
- messages.info(request,"You don't have permission to view this collection.")
+ messages.info(request, "You don't have permission to view this collection.")
return redirect("collections_treemap")
context = base_size_data(request,
diff --git a/shub/apps/main/views/containers.py b/shub/apps/main/views/containers.py
index 4a59b360..de4dd0de 100644
--- a/shub/apps/main/views/containers.py
+++ b/shub/apps/main/views/containers.py
@@ -19,14 +19,13 @@
)
from django.contrib.auth.decorators import login_required
-from django.http import HttpResponseRedirect
+from django.http import (
+ HttpResponseRedirect,
+ Http404
+)
from django.contrib import messages
from datetime import datetime
-import os
-import json
-import re
-
# get container
@@ -40,7 +39,6 @@ def get_container(cid):
return container
-
################################################################################
# HELPERS ######################################################################
################################################################################
@@ -49,11 +47,11 @@ def view_container(request, cid):
container = get_container(cid)
if not container.has_view_permission(request):
- messages.info(request,"This container is private.")
+ messages.info(request, "This container is private.")
return redirect('collections')
- messages.info(request,"We don't know what to do for this view yet, ideas?")
- return redirect('collection_details',cid=container.collection.id)
+ messages.info(request, "We don't know what to do for this view yet, ideas?")
+ return redirect('collection_details', cid=container.collection.id)
def view_named_container(request, collection, name, tag):
@@ -63,38 +61,39 @@ def view_named_container(request, collection, name, tag):
name=name,
tag=tag)
except Container.DoesNotExist:
- messages.info(request,"Container not found.")
+ messages.info(request, "Container not found.")
return redirect('collections')
- return container_details(request,container.id)
+ return container_details(request, container.id)
-def container_details(request,cid):
+def container_details(request, cid):
container = get_container(cid)
if not container.has_view_permission(request):
- messages.info(request,"This container is private.")
+ messages.info(request, "This container is private.")
return redirect('collections')
edit_permission = container.has_edit_permission(request)
labels = Label.objects.filter(containers=container)
- context = { "container":container,
- "labels":labels,
- "edit_permission": edit_permission }
+ context = {"container": container,
+ "labels": labels,
+ "edit_permission": edit_permission}
return render(request, 'containers/container_details.html', context)
+@login_required
def delete_container(request, cid):
'''delete a container, including it's corresponding files
'''
container = get_container(cid)
if not container.has_edit_permission(request):
- messages.info(request,"This action is not permitted.")
+ messages.info(request, "This action is not permitted.")
return redirect('collections')
container.delete()
- messages.info(request,'Container successfully deleted.')
+ messages.info(request, 'Container successfully deleted.')
return redirect(container.collection.get_absolute_url())
@@ -103,30 +102,30 @@ def container_tags(request, cid):
container = get_container(cid)
if not container.has_view_permission(request):
- messages.info(request,"This container is private.")
+ messages.info(request, "This container is private.")
return redirect('collections')
context = {"container": container}
return render(request, 'containers/container_tags.html', context)
-
-
-
-###############################################################################################
-# FREEZE ######################################################################################
-###############################################################################################
+################################################################################
+# FREEZE #######################################################################
+################################################################################
@login_required
-def change_freeze_status(request,cid):
+def change_freeze_status(request, cid):
'''freeze or unfreeze a container
- :param cid: the container to freeze or unfreeze
+
+ Parameters
+ ==========
+ cid: the container to freeze or unfreeze
'''
container = get_container(cid)
edit_permission = container.has_edit_permission(request)
- if edit_permission == True:
+ if edit_permission:
# If the container wasn't frozen, assign new version
# '2017-08-06T19:28:43.294175'
@@ -136,14 +135,14 @@ def change_freeze_status(request,cid):
container.frozen = not container.frozen
container.save()
message = "Container %s:%s be overwritten by new pushes." %(container.name,
- container.tag)
+ container.tag)
if container.frozen:
message = "%s:%s is frozen, and will not be overwritten by push." %(container.name,
container.tag)
messages.info(request, message)
else:
- messages.info(request,"You do not have permissions to perform this operation.")
+ messages.info(request, "You do not have permissions to perform this operation.")
previous_page = request.META.get('HTTP_REFERER', None)
if previous_page is not None:
diff --git a/shub/apps/main/views/download.py b/shub/apps/main/views/download.py
index 2b6007fd..1fb7f565 100644
--- a/shub/apps/main/views/download.py
+++ b/shub/apps/main/views/download.py
@@ -8,64 +8,50 @@
'''
-from shub.apps.main.models import (
- Container,
- Collection,
- Share,
- Star
-)
+from django.contrib import messages
+from django.http.response import Http404
-from django.shortcuts import (
- get_object_or_404,
- render_to_response,
- render,
- redirect
-)
+from django.shortcuts import redirect
from django.http import (
- JsonResponse,
HttpResponse,
FileResponse
)
-from shub.apps.main.utils import (
- calculate_expiration_date,
- validate_share
-)
+from shub.apps.main.models import Share
+from shub.apps.main.utils import validate_share
-from django.contrib.auth.decorators import login_required
-from django.contrib import messages
-from django.http.response import Http404
from rest_framework import status
from rest_framework.response import Response
import os
-import re
-import uuid
from .containers import get_container
-
-#######################################################################################
+################################################################################
# CONTAINER DOWNLOAD
-#######################################################################################
+################################################################################
-def download_recipe(request,cid):
+def download_recipe(request, cid):
'''download a container recipe
'''
container = get_container(cid)
+
if "deffile" in container.metadata:
recipe = container.metadata['deffile']
- filename = "Singularity.%s" %container.tag
+ filename = "Singularity.%s" % container.tag
response = HttpResponse(recipe,
content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="%s"' %filename
return response
+ messages.info(request, "Container does not have recipe locally.")
+ return redirect(container.get_absolute_url())
+
-def download_share(request,cid,secret):
+def download_share(request, cid, secret):
'''download a custom share for a container
'''
container = get_container(cid)
@@ -86,7 +72,7 @@ def download_share(request,cid,secret):
if secret != share.secret:
raise Response(status.HTTP_401_UNAUTHORIZED)
- return _download_container(container)
+ return _download_container(container, request)
@@ -99,10 +85,10 @@ def download_container(request, cid, secret):
if container.collection.secret != secret:
raise Http404
- return _download_container(container)
+ return _download_container(container, request)
-def _download_container(container):
+def _download_container(container, request):
'''
download_container is the shared function between downloading a share
or a direct container download. For each, we create a FileResponse
@@ -114,13 +100,22 @@ def _download_container(container):
container: the container to download
'''
+ if container.image is not None:
- filename = container.get_download_name()
- filepath = container.image.get_abspath()
+ filename = container.get_download_name()
+ filepath = container.image.get_abspath()
- f = open(filepath, 'rb')
- response = FileResponse(f, content_type='application/img')
- response['Content-Disposition'] = 'attachment; filename="%s"' %filename
- response['Content-Length'] = os.path.getsize(filepath)
+ f = open(filepath, 'rb')
+ response = FileResponse(f, content_type='application/img')
+ response['Content-Disposition'] = 'attachment; filename="%s"' % filename
+ response['Content-Length'] = os.path.getsize(filepath)
- return response
+ return response
+
+ # A remove build will store a metadata image url
+ elif 'image' in container.metadata:
+ return redirect(container.metadata['image'])
+
+ else:
+ messages.info(request, "Container does not have image served locally.")
+ raise Http404
diff --git a/shub/apps/main/views/labels.py b/shub/apps/main/views/labels.py
index dfe08f4e..da84a56d 100644
--- a/shub/apps/main/views/labels.py
+++ b/shub/apps/main/views/labels.py
@@ -8,41 +8,24 @@
'''
-from shub.apps.main.models import (
- Container,
- Collection,
- Label,
- Star
-)
-
+from shub.apps.main.models import Label
+from django.http.response import Http404
+from django.contrib import messages
+from django.db.models import Count
from django.shortcuts import (
- get_object_or_404,
- render_to_response,
render,
redirect
)
from django.db.models import Q
-from django.http import (
- JsonResponse,
- HttpResponseRedirect
-)
-from django.http.response import Http404
-from django.contrib.auth.decorators import login_required
-from django.contrib import messages
-from django.db.models import Count
-
-import os
-import re
-import uuid
+from django.http import HttpResponseRedirect
-from .collections import get_collection
from django.urls import reverse
#### GETS #############################################################
-def get_label(key=None,value=None):
+def get_label(key=None, value=None):
keyargs = dict()
if key is not None:
@@ -62,51 +45,49 @@ def get_label(key=None,value=None):
def all_labels(request):
# Generate queryset of labels annotated with count based on key, eg {'key': 'maintainer', 'id__count': 1}
labels = Label.objects.values('key').annotate(Count("id")).order_by()
- context = {"labels":labels}
+ context = {"labels": labels}
return render(request, 'labels/all_labels.html', context)
-def view_label(request,lid):
+def view_label(request, lid):
'''view containers with a specific, exact key/pair'''
try:
label = Label.objects.get(id=lid)
except:
- messages.info(request,"This label does not exist.")
+ messages.info(request, "This label does not exist.")
return redirect('all_labels')
- context = {"label":label }
+ context = {"label": label}
return render(request, 'labels/view_label.html', context)
-def view_label_keyval(request,key,value):
+def view_label_keyval(request, key, value):
'''view containers with a specific, exact key/pair'''
try:
- label = Label.objects.get(key=key,
- value=value)
+ label = Label.objects.get(key=key, value=value)
except:
- messages.info(request,"This label does not exist.")
+ messages.info(request, "This label does not exist.")
return redirect('all_labels')
- url = reverse('view_label_id', kwargs={'lid': label.id })
+ url = reverse('view_label_id', kwargs={'lid': label.id})
return HttpResponseRedirect(url)
-def view_label_key(request,key):
+def view_label_key(request, key):
'''view all labels with a shared key'''
labels = Label.objects.filter(key=key)
- context = {"labels":labels,
- "key":key }
+ context = {"labels": labels, "key": key}
return render(request, 'labels/view_label_key.html', context)
-def update_container_labels(container,labels):
- for name,value in labels.items():
- if isinstance(value,str):
+def update_container_labels(container, labels):
+ for name, value in labels.items():
+ if isinstance(value, str):
value = value.lower()
- label,created = Label.objects.get_or_create(key=name.lower(),
- value=value)
+ label, _ = Label.objects.get_or_create(key=name.lower(),
+ value=value)
label.save()
diff --git a/shub/apps/main/views/share.py b/shub/apps/main/views/share.py
index e8d632d8..c2df3b86 100644
--- a/shub/apps/main/views/share.py
+++ b/shub/apps/main/views/share.py
@@ -8,59 +8,55 @@
'''
-from shub.apps.main.utils import calculate_expiration_date
-from shub.apps.api.tasks import expire_share
-from django.shortcuts import (
- render,
- reverse
-)
+from django.shortcuts import reverse
from django.contrib.auth.decorators import login_required
-from django.http import HttpResponseRedirect, JsonResponse
-from django.contrib import messages
+from django.http import JsonResponse
from datetime import datetime
-from notifications.signals import notify
-from shub.apps.users.utils import get_user
+from shub.apps.main.utils import calculate_expiration_date
+from shub.apps.main.views import get_container
+from shub.apps.api.tasks import expire_share
-import os
-import json
-import re
+import django_rq
@login_required
def generate_share(request, cid):
'''generate a temporary share link for a container
- :param cid: the container to generate a share link for
+
+ Parameters
+ ==========
+ cid: the container to generate a share link for
'''
container = get_container(cid)
edit_permission = container.has_edit_permission(request)
- if edit_permission == True:
+ if edit_permission:
days = request.POST.get('days', None)
if days is not None:
days = int(days)
try:
expire_date = calculate_expiration_date(days)
- share,created = Share.objects.get_or_create(container=container,
- expire_date=expire_date)
+ share, _ = Share.objects.get_or_create(container=container,
+ expire_date=expire_date)
share.save()
# Generate an expiration task
- expire_share.apply_async(kwargs={"sid": share.id},
- eta=expire_date)
+ django_rq.enqueue(expire_share, sid=share.id,
+ eta=expire_date)
link = reverse('download_share', kwargs={'cid':container.id,
- 'secret':share.secret })
+ 'secret':share.secret})
expire_date = datetime.strftime(expire_date, '%b %m, %Y')
response = {"status": "success",
"days": days,
"expire": expire_date,
- "link": link }
+ "link": link}
except:
response = {"status": "error",
- "days": days }
+ "days": days}
return JsonResponse(response)
diff --git a/shub/apps/main/views/stars.py b/shub/apps/main/views/stars.py
index 22950903..94d30a7e 100644
--- a/shub/apps/main/views/stars.py
+++ b/shub/apps/main/views/stars.py
@@ -9,37 +9,20 @@
'''
from shub.apps.main.models import (
- Container,
Collection,
Star
)
-from django.shortcuts import (
- get_object_or_404,
- render_to_response,
- render,
- redirect
-)
-
+from django.contrib.auth.decorators import login_required
+from django.shortcuts import render
from django.db.models.aggregates import Count
-from django.http import (
- JsonResponse,
- HttpResponseRedirect
-)
+from django.http import JsonResponse
from .collections import get_collection
-from django.http.response import Http404
-from django.contrib.auth.decorators import login_required
-from django.contrib import messages
-
-
-import os
-import re
-import uuid
-###############################################################################################
-# COLLECTIONS #################################################################################
-###############################################################################################
+################################################################################
+# COLLECTIONS ##################################################################
+################################################################################
def collection_stars(request):
'''This is a "favorite" view of collections ordered based on number of stars.
@@ -47,8 +30,8 @@ def collection_stars(request):
# Favorites based on stars
collections = Collection.objects.filter(private=False).annotate(Count('star', distinct=True)).order_by('-star__count')
- collections = [x for x in collections if x.star__count>0]
- context = {"collections": collections }
+ collections = [x for x in collections if x.star__count > 0]
+ context = {"collections": collections}
return render(request, 'stars/collection_stars.html', context)
@@ -57,9 +40,10 @@ def collection_downloads(request):
'''
from shub.apps.logs.models import APIRequestCount
- favorites = APIRequestCount.objects.filter(method="get",path__contains="ContainerDetailByName").order_by('-count')
+ favorites = APIRequestCount.objects.filter(method="get",
+ path__contains="ContainerDetailByName").order_by('-count')
- context = {"favorites": favorites }
+ context = {"favorites": favorites}
return render(request, 'stars/collection_downloads.html', context)
@@ -68,7 +52,7 @@ def collection_downloads(request):
#######################################################################################
@login_required
-def star_collection(request,cid):
+def star_collection(request, cid):
'''change favorite status of collection. If it's favorited, unfavorite by deleting
the star. If not, then create it.
'''
diff --git a/shub/apps/main/views/tags.py b/shub/apps/main/views/tags.py
index 12336f51..dad1443f 100644
--- a/shub/apps/main/views/tags.py
+++ b/shub/apps/main/views/tags.py
@@ -8,16 +8,11 @@
'''
-from shub.apps.main.models import (
- Container,
- Collection
-)
+from shub.apps.main.models import Container
from taggit.models import Tag
from django.shortcuts import (
- get_object_or_404,
- render_to_response,
render,
redirect
)
@@ -27,18 +22,9 @@
from django.contrib.auth.decorators import login_required
from django.contrib import messages
-import os
-import re
-import uuid
-
from .containers import get_container
-
-
-
-#### GETS #############################################################
-
-def get_tag(name=None,tid=None):
+def get_tag(name=None, tid=None):
keyargs = dict()
if name is not None:
@@ -54,9 +40,9 @@ def get_tag(name=None,tid=None):
return tag
-###############################################################################################
-# TAGS ########################################################################################
-###############################################################################################
+################################################################################
+# TAGS #########################################################################
+################################################################################
def all_tags(request):
@@ -66,34 +52,33 @@ def all_tags(request):
# View containers for a tag
-def view_tag(request,tid):
+def view_tag(request, tid):
try:
tag = Tag.objects.get(id=tid)
except:
- messages.info(request,"This tag does not exist.")
+ messages.info(request, "This tag does not exist.")
return redirect('all_tags')
containers = Container.objects.filter(tags__name=tag,
collection__private=False)
- context = {"containers":containers,
- "tag":tag }
+ context = {"containers": containers, "tag": tag}
return render(request, 'tags/view_tag.html', context)
-#######################################################################################
+################################################################################
# COLLECTION TAG MANAGEMENT
-#######################################################################################
+################################################################################
@login_required
-def add_tag(request,cid):
+def add_tag(request, cid):
'''manually add a tag to the collection
'''
container = get_container(cid)
edit_permission = container.collection.has_edit_permission(request)
if edit_permission and request.method == "POST":
- tag = request.POST.get("tag",None)
+ tag = request.POST.get("tag", None)
if tag is not None:
container.tags.add(tag.lower())
container.save()
@@ -105,7 +90,7 @@ def add_tag(request,cid):
@login_required
-def remove_tag(request,cid):
+def remove_tag(request, cid):
'''remove a tag from a collection
'''
container = get_container(cid)
@@ -114,8 +99,8 @@ def remove_tag(request,cid):
if edit_permission and request.method == "POST":
tag = request.POST.get("tag", None)
if tag is not None:
- tags = [x for x in container.tags.all() if x.name==tag.lower()]
- if len(tags) > 0:
+ tags = [x for x in container.tags.all() if x.name == tag.lower()]
+ if tags:
container.tags.remove(tag)
container.save()
diff --git a/shub/apps/singularity/templatetags/to_dash.py b/shub/apps/singularity/templatetags/to_dash.py
index 1849ea03..ec7a4de0 100644
--- a/shub/apps/singularity/templatetags/to_dash.py
+++ b/shub/apps/singularity/templatetags/to_dash.py
@@ -14,6 +14,6 @@
@register.filter
def to_dash(value):
- if isinstance(value,str):
- return value.replace(".","-")
+ if isinstance(value, str):
+ return value.replace(".", "-")
return value
diff --git a/shub/apps/singularity/templatetags/to_space.py b/shub/apps/singularity/templatetags/to_space.py
new file mode 100644
index 00000000..69a0af82
--- /dev/null
+++ b/shub/apps/singularity/templatetags/to_space.py
@@ -0,0 +1,19 @@
+'''
+
+Copyright (C) 2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django import template
+
+register = template.Library()
+
+@register.filter
+def to_space(value):
+ if isinstance(value, str):
+ return value.replace("_", " ")
+ return value
diff --git a/shub/apps/users/forms.py b/shub/apps/users/forms.py
index edb22193..f5064d2a 100644
--- a/shub/apps/users/forms.py
+++ b/shub/apps/users/forms.py
@@ -9,27 +9,14 @@
'''
from crispy_forms.layout import (
- Button,
- Field,
- HTML,
Layout,
Submit
)
-from crispy_forms.bootstrap import (
- AppendedText,
- FormActions,
- PrependedText,
- Tab,
- TabHolder
-)
-
-from crispy_forms.bootstrap import StrictButton
from crispy_forms.helper import FormHelper
from shub.apps.users.models import Team
from django.forms import ModelForm
-from django import forms
class TeamForm(ModelForm):
@@ -47,5 +34,4 @@ def __init__(self, *args, **kwargs):
super(TeamForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout()
- tab_holder = TabHolder()
self.helper.add_input(Submit("submit", "Save"))
diff --git a/shub/apps/users/management/commands/add_admin.py b/shub/apps/users/management/commands/add_admin.py
index 37fe5070..e5464333 100644
--- a/shub/apps/users/management/commands/add_admin.py
+++ b/shub/apps/users/management/commands/add_admin.py
@@ -15,7 +15,6 @@
from shub.apps.users.models import User
from shub.logger import bot
-import re
class Command(BaseCommand):
'''add admin will add admin and manager privs singularity
@@ -27,11 +26,11 @@ def add_arguments(self, parser):
parser.add_argument('--username', dest='username', default=None, type=str)
help = "Generates an admin for the registry."
- def handle(self,*args, **options):
+ def handle(self, *args, **options):
if options['username'] is None:
raise CommandError("Please provide a username with --username")
- bot.debug("Username: %s" %options['username'])
+ bot.debug("Username: %s" % options['username'])
try:
user = User.objects.get(username=options['username'])
diff --git a/shub/apps/users/management/commands/add_superuser.py b/shub/apps/users/management/commands/add_superuser.py
index 0a86915e..9cab5217 100644
--- a/shub/apps/users/management/commands/add_superuser.py
+++ b/shub/apps/users/management/commands/add_superuser.py
@@ -15,7 +15,6 @@
from shub.apps.users.models import User
from shub.logger import bot
-import re
class Command(BaseCommand):
'''add superuser will add admin and manager privs singularity
@@ -27,7 +26,7 @@ def add_arguments(self, parser):
parser.add_argument('--username', dest='username', default=None, type=str)
help = "Generates a superuser for the registry."
- def handle(self,*args, **options):
+ def handle(self, *args, **options):
if options['username'] is None:
raise CommandError("Please provide a username with --username")
diff --git a/shub/apps/users/management/commands/remove_admin.py b/shub/apps/users/management/commands/remove_admin.py
index 7f424248..d4e2d724 100644
--- a/shub/apps/users/management/commands/remove_admin.py
+++ b/shub/apps/users/management/commands/remove_admin.py
@@ -15,19 +15,18 @@
from shub.apps.users.models import User
from shub.logger import bot
-import re
class Command(BaseCommand):
'''remove admin will remove admin privs for a singularity
- registry. The super user is an admin that can build, delete,
- and manage images
+ registry. The super user is an admin that can build, delete,
+ and manage images
'''
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('--username', dest='username', default=None, type=str)
help = "Removes admin priviledges for the registry."
- def handle(self,*args, **options):
+ def handle(self, *args, **options):
if options['username'] is None:
raise CommandError("Please provide a username with --username")
diff --git a/shub/apps/users/management/commands/remove_superuser.py b/shub/apps/users/management/commands/remove_superuser.py
index 415a6650..81047a43 100644
--- a/shub/apps/users/management/commands/remove_superuser.py
+++ b/shub/apps/users/management/commands/remove_superuser.py
@@ -15,19 +15,18 @@
from shub.apps.users.models import User
from shub.logger import bot
-import re
class Command(BaseCommand):
'''remove superuser will remove admin privs for a singularity
- registry. The super user is an admin that can build, delete,
- and manage images
+ registry. The super user is an admin that can build, delete,
+ and manage images
'''
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('--username', dest='username', default=None, type=str)
help = "Removes superuser priviledges for the registry."
- def handle(self,*args, **options):
+ def handle(self, *args, **options):
if options['username'] is None:
raise CommandError("Please provide a username with --username")
diff --git a/shub/apps/users/models.py b/shub/apps/users/models.py
index c82cd7ab..8561542c 100644
--- a/shub/apps/users/models.py
+++ b/shub/apps/users/models.py
@@ -9,17 +9,20 @@
'''
from django.conf import settings
-from django.contrib.auth.models import Group
-from django.db.models.signals import ( post_save, pre_save )
-from django.dispatch import receiver
-from rest_framework.authtoken.models import Token
-from shub.apps.users.utils import get_usertoken
from django.contrib.auth.models import AbstractUser, BaseUserManager
+from django.db.models.signals import (
+ post_save,
+ pre_save
+)
+from django.dispatch import receiver
+
from django.urls import reverse
from django.db import models
+
+
+from rest_framework.authtoken.models import Token
+from shub.apps.users.utils import get_usertoken
from itertools import chain
-import datetime
-import re
import os
################################################################################
@@ -36,7 +39,7 @@ def get_image_path(instance, filename):
TEAM_TYPES = (('invite', 'Invite only. The user must be invited by an owner'),
- ('open','Open. Anyone can join the team without asking.'))
+ ('open', 'Open. Anyone can join the team without asking.'))
class CustomUserManager(BaseUserManager):
@@ -133,7 +136,7 @@ def get_credentials(self, provider):
except:
# Case 2: more than one credential for the provider
credential = self.social_auth.filter(provider=provider)
- if len(credential) > 0:
+ if credential:
return credential.last()
@@ -265,10 +268,10 @@ def has_member(self, username):
def __str__(self):
- return "%s" %(self.name)
+ return "%s" % self.name
def __unicode__(self):
- return "%s" %(self.name)
+ return "%s" % self.name
def get_label(self):
return "users"
@@ -289,7 +292,7 @@ def __str__(self):
return "<%s:%s>" %(self.id, self.team.name)
def __unicode__(self):
- return "<%s:%s>" %(self.id,self.team.name)
+ return "<%s:%s>" %(self.id, self.team.name)
def get_label(self):
return "users"
@@ -303,8 +306,7 @@ def get_url(self):
class Meta:
app_label = 'users'
- unique_together = (("code", "team"),)
-
+ unique_together = (("code", "team"),)
################################################################################
@@ -313,10 +315,10 @@ class Meta:
@receiver(pre_save, sender=Team)
-def create_team_group(sender, instance, **kwargs):
+def create_team_group(sender, instance, **kwargs):
# Get the name from the team
- name = instance.name.replace(' ','-').lower().strip()
+ name = instance.name.replace(' ', '-').lower().strip()
instance.name = name
diff --git a/shub/apps/users/permissions.py b/shub/apps/users/permissions.py
index 9cbbff77..f0b0aebb 100644
--- a/shub/apps/users/permissions.py
+++ b/shub/apps/users/permissions.py
@@ -8,20 +8,28 @@
'''
-from shub.settings import USER_COLLECTIONS
-
+from shub.settings import (
+ USER_COLLECTIONS,
+ USER_COLLECTION_LIMIT
+)
def has_create_permission(request):
''' determine if a user can create a team.
1. superusers and admin (global) can.
2. If user collections is True, users can create teams
-
- '''
+ '''
if request.user.is_superuser or request.user.is_staff:
return True
+
if USER_COLLECTIONS is True and not request.user.is_anonymous:
+
+ # Does the registry have a user collection limit?
+ if USER_COLLECTION_LIMIT is not None:
+ if request.user.container_collection_owners.count() >= USER_COLLECTION_LIMIT:
+ return False
return True
+
return False
diff --git a/shub/apps/users/urls/social.py b/shub/apps/users/urls/social.py
index 0899d15a..585c3308 100644
--- a/shub/apps/users/urls/social.py
+++ b/shub/apps/users/urls/social.py
@@ -9,7 +9,6 @@
'''
from django.conf.urls import url, include
-from django.conf import settings
import shub.apps.users.views as user_views
from social_django import urls as social_urls
diff --git a/shub/apps/users/urls/teams.py b/shub/apps/users/urls/teams.py
index 9dd06efa..b4122ccb 100644
--- a/shub/apps/users/urls/teams.py
+++ b/shub/apps/users/urls/teams.py
@@ -8,8 +8,7 @@
'''
-from django.conf.urls import url, include
-from django.conf import settings
+from django.conf.urls import url
import shub.apps.users.views as views
urlpatterns = [
@@ -29,7 +28,6 @@
# Add members and owners
url(r'^teams/(?P.+?)/add/owner/(?P.+?)$', views.add_owner, name="remove_owner"),
-
- url(r'^teams/new$',views.edit_team,name='new_team'),
+ url(r'^teams/new$', views.edit_team, name='new_team'),
]
diff --git a/shub/apps/users/urls/users.py b/shub/apps/users/urls/users.py
index cfc8276e..3df54ac3 100644
--- a/shub/apps/users/urls/users.py
+++ b/shub/apps/users/urls/users.py
@@ -18,6 +18,4 @@
url(r'^token', views.view_token, name="token"),
url(r'^u/profile', views.view_profile, name="profile"),
#url(r'^(?P[A-Za-z0-9@/./+/-/_]+)/$',views.view_profile,name="profile"),
-
]
-
diff --git a/shub/apps/users/utils.py b/shub/apps/users/utils.py
index f852fed4..8dfad071 100644
--- a/shub/apps/users/utils.py
+++ b/shub/apps/users/utils.py
@@ -21,7 +21,6 @@ def get_user(uid):
Parameters
==========
uid: the id of the user
-
'''
from shub.apps.users.models import User
keyargs = {'id':uid}
@@ -36,7 +35,7 @@ def get_user(uid):
def get_usertoken(user):
try:
token = Token.objects.get(user=user)
- except TokenDoesNotExist:
+ except Token.DoesNotExist:
token = Token.objects.create(user=user)
return token.key
@@ -70,9 +69,12 @@ def create_code_challenge():
def basic_auth_header(username, password):
'''return a base64 encoded header object to
- generate a token
- :param username: the username
- :param password: the password
+ generate a token
+
+ Parameters
+ ==========
+ username: the username
+ password: the password
'''
s = "%s:%s" % (username, password)
if sys.version_info[0] >= 3:
@@ -82,4 +84,3 @@ def basic_auth_header(username, password):
credentials = base64.b64encode(s)
auth = {"Authorization": "Basic %s" % credentials}
return auth
-
diff --git a/shub/apps/users/views/auth.py b/shub/apps/users/views/auth.py
index 2912d0f6..2c7d6d3a 100644
--- a/shub/apps/users/views/auth.py
+++ b/shub/apps/users/views/auth.py
@@ -13,33 +13,35 @@
render
)
+from django.contrib import messages
+from django.utils import timezone
from django.http import JsonResponse
from django.contrib.auth import logout as auth_logout
-from shub.apps.users.models import User
from django.contrib.auth.decorators import login_required
-from datetime import datetime
-from django.utils import timezone
-#######################################################################################
+################################################################################
# AUTHENTICATION
-#######################################################################################
+################################################################################
-def validate_credentials(user,context=None):
+def validate_credentials(user, context=None):
'''validate_credentials will return a context object with "aok" for each credential
- that exists, and "None" if it does not for a given user
- :param user: the user to check, should have social_auth
- :param context: an optional context object to append to
+ that exists, and "None" if it does not for a given user
+
+ Parameters
+ ==========
+ user: the user to check, should have social_auth
+ context: an optional context object to append to
'''
- if context == None:
+ if context is None:
context = dict()
# Right now we have github for repos and google for storage
- credentials = [{'provider':'google-oauth2','key':'google_credentials'},
- {'provider':'github','key':'github_credentials'},
- {'provider':'globus','key':'globus_credentials'},
- {'provider':'twitter','key':'twitter_credentials'}]
+ credentials = [{'provider':'google-oauth2', 'key':'google_credentials'},
+ {'provider':'github', 'key':'github_credentials'},
+ {'provider':'globus', 'key':'globus_credentials'},
+ {'provider':'twitter', 'key':'twitter_credentials'}]
# Iterate through credentials, and set each available to aok. This is how
# the templates will know to tell users which they need to add, etc.
@@ -48,7 +50,7 @@ def validate_credentials(user,context=None):
credential = None
if not user.is_anonymous:
credential = user.get_credentials(provider=group['provider'])
- if credential != None:
+ if credential is not None:
context[group['key']] = 'aok'
else:
credentials_missing = None
@@ -64,27 +66,27 @@ def agree_terms(request):
request.user.agree_terms = True
request.user.agree_terms_date = timezone.now()
request.user.save()
- response_data = {'status': request.user.agree_terms }
+ response_data = {'status': request.user.agree_terms}
return JsonResponse(response_data)
return JsonResponse({"Unicorn poop cookies...": "I will never understand the allure."})
-def login(request,message=None):
+def login(request, message=None):
'''login will either show the user a button to login with github, and then a link
- to their collections (given storage is set up) or a link to connect storage (if it
- isn't)
+ to their collections (given storage is set up) or a link to connect storage (if it
+ isn't)
'''
if message is not None:
- messages.info(message)
+ messages.info(request, message)
- context=None
+ context = None
if request.user.is_authenticated:
if not request.user.agree_terms:
- return render(request,'terms/usage_agreement_login.html', context)
+ return render(request, 'terms/usage_agreement_login.html', context)
context = validate_credentials(user=request.user)
- return render(request,'social/login.html', context)
+ return render(request, 'social/login.html', context)
@login_required
@@ -117,15 +119,14 @@ def redirect_if_no_refresh_token(backend, response, social, *args, **kwargs):
def social_user(backend, uid, user=None, *args, **kwargs):
'''OVERRIDED: It will give the user an error message if the
- account is already associated with a username.'''
+ account is already associated with a username.'''
provider = backend.name
social = backend.strategy.storage.user.get_social_auth(provider, uid)
if social:
if user and social.user != user:
msg = 'This {0} account is already in use.'.format(provider)
- return login(request=backend.strategy.request,
- message=msg)
+ return login(request=backend.strategy.request, message=msg)
#raise AuthAlreadyAssociated(backend, msg)
elif not user:
user = social.user
diff --git a/shub/apps/users/views/teams.py b/shub/apps/users/views/teams.py
index 8676b7dd..d0605691 100644
--- a/shub/apps/users/views/teams.py
+++ b/shub/apps/users/views/teams.py
@@ -11,14 +11,17 @@
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, redirect
+from django.http.response import Http404
from django.http import (
HttpResponseRedirect,
JsonResponse
)
-from shub.settings import USER_COLLECTIONS
from shub.apps.users.forms import TeamForm
-from shub.apps.users.models import ( User, Team, MembershipInvite )
+from shub.apps.users.models import (
+ Team,
+ MembershipInvite
+)
from shub.apps.users.permissions import (
has_create_permission,
is_invite_valid
@@ -54,9 +57,11 @@ def get_team(tid):
@login_required
def edit_team(request, tid=None):
'''edit_team is the view to edit an existing team, or create a new team.
- :parma tid: the team id to edit or create. If none, indicates a new team
+
+ Parameters
+ ==========
+ tid: the team id to edit or create. If none, indicates a new team
'''
-
if tid:
team = get_team(tid)
edit_permission = team.has_edit_permission(request)
@@ -109,7 +114,7 @@ def view_teams(request):
create_permission = has_create_permission(request)
context = {"teams": teams,
- "has_create_permission" : create_permission }
+ "has_create_permission" : create_permission}
return render(request, "teams/all_teams.html", context)
@@ -166,13 +171,12 @@ def join_team(request, tid, code=None):
messages.info(request, "This code is invalid to join this team.")
if add_user:
-
if user not in team.get_members():
team.members.add(user)
team.save()
- messages.info(request,"You have been added to team %s" %(team.name))
+ messages.info(request, "You have been added to team %s" % team.name)
else:
- messages.info(request,"You are already a member of %s" %(team.name))
+ messages.info(request, "You are already a member of %s" % team.name)
return HttpResponseRedirect(team.get_absolute_url())
@@ -306,7 +310,7 @@ def delete_team(request, tid):
team = get_team(tid)
if request.user in team.owners.all():
- messages.info(request,'%s has been deleted.' %team.name)
+ messages.info(request, '%s has been deleted.' % team.name)
team.delete()
else:
messages.info(request, "You are not allowed to perform this action.")
diff --git a/shub/apps/users/views/users.py b/shub/apps/users/views/users.py
index bb393385..775dabc7 100644
--- a/shub/apps/users/views/users.py
+++ b/shub/apps/users/views/users.py
@@ -11,14 +11,19 @@
from shub.apps.users.models import User
from shub.apps.main.models import Collection, Star
from shub.apps.logs.models import APIRequestCount
+
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models.aggregates import Count
-from django.shortcuts import render, redirect
-from shub.settings import USER_COLLECTIONS
+from django.shortcuts import (
+ get_object_or_404,
+ render,
+ redirect
+)
from django.db.models import Q, Sum
+
@login_required
def view_token(request):
''' tokens are valid for pushing (creating collections) and only available
diff --git a/shub/celery.py b/shub/celery.py
deleted file mode 100644
index 9a7a9fae..00000000
--- a/shub/celery.py
+++ /dev/null
@@ -1,39 +0,0 @@
-'''
-
-Copyright (C) 2017-2019 Vanessa Sochat.
-
-This Source Code Form is subject to the terms of the
-Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
-with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-'''
-
-from __future__ import absolute_import
-import os
-from celery import Celery
-from django.conf import settings
-
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shub.settings')
-shubcelery = Celery('shub')
-
-# Using a string here means the worker will not have to
-# pickle the object when using Windows.
-shubcelery.config_from_object('django.conf:settings')
-shubcelery.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
-
-from opbeat.contrib.django.models import (
- client,
- logger,
- register_handlers
-)
-
-from opbeat.contrib.celery import register_signal
-
-try:
- register_signal(client)
-
-except Exception as e:
- logger.exception('Failed installing celery hook: %s' % e)
-
-if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
- register_handlers()
diff --git a/shub/logger.py b/shub/logger.py
index 88c94191..ea7060d8 100644
--- a/shub/logger.py
+++ b/shub/logger.py
@@ -25,7 +25,7 @@
class ShubMessage:
- def __init__(self,MESSAGELEVEL=None):
+ def __init__(self, MESSAGELEVEL=None):
self.level = get_logging_level()
self.history = []
self.errorStream = sys.stderr
@@ -43,12 +43,13 @@ def __init__(self,MESSAGELEVEL=None):
def useColor(self):
'''useColor will determine if color should be added
- to a print. Will check if being run in a terminal, and
- if has support for asci'''
+ to a print. Will check if being run in a terminal, and
+ if has support for ascii
+ '''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
- streams = [self.errorStream,self.outputStream]
+ streams = [self.errorStream, self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
@@ -57,9 +58,10 @@ def useColor(self):
return True
- def addColor(self,level,text):
+ def addColor(self, level, text):
'''addColor to the prompt (usually prefix) if terminal
- supports, and specified to do so'''
+ supports, and specified to do so
+ '''
if self.colorize:
if level in self.colors:
text = "%s%s%s" %(self.colors[level],
@@ -68,7 +70,7 @@ def addColor(self,level,text):
return text
- def emitError(self,level):
+ def emitError(self, level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABRT,
@@ -78,21 +80,20 @@ def emitError(self,level):
VERBOSE1,
VERBOSE2,
VERBOSE3,
- DEBUG ]:
+ DEBUG]:
return True
return False
- def emitOutput(self,level):
+ def emitOutput(self, level):
'''determine if a level should print to stdout
only includes INFO'''
- if level in [LOG,
- INFO]:
+ if level in [LOG, INFO]:
return True
return False
- def isEnabledFor(self,messageLevel):
+ def isEnabledFor(self, messageLevel):
'''check if a messageLevel is enabled to emit a level
'''
if messageLevel <= self.level:
@@ -100,25 +101,28 @@ def isEnabledFor(self,messageLevel):
return False
- def emit(self,level,message,prefix=None):
+ def emit(self, level, message, prefix=None):
'''emit is the main function to print the message
- optionally with a prefix
- :param level: the level of the message
- :param message: the message to print
- :param prefix: a prefix for the message
+ optionally with a prefix
+
+ Parameters
+ ==========
+ level: the level of the message
+ message: the message to print
+ prefix: a prefix for the message
'''
if prefix is not None:
- prefix = self.addColor(level,"%s " %(prefix))
+ prefix = self.addColor(level, "%s " % prefix)
else:
prefix = ""
- message = self.addColor(level,message)
+ message = self.addColor(level, message)
# Add the prefix
- message = "%s%s" %(prefix,message)
+ message = "%s%s" %(prefix, message)
if not message.endswith('\n'):
- message = "%s\n" %message
+ message = "%s\n" % message
# If the level is quiet, only print to error
if self.level == QUIET:
@@ -127,26 +131,26 @@ def emit(self,level,message,prefix=None):
# Otherwise if in range print to stdout and stderr
elif self.isEnabledFor(level):
if self.emitError(level):
- self.write(self.errorStream,message)
+ self.write(self.errorStream, message)
else:
- self.write(self.outputStream,message)
+ self.write(self.outputStream, message)
# Add all log messages to history
self.history.append(message)
- def write(self,stream,message):
+ def write(self, stream, message):
'''write will write a message to a stream,
- first checking the encoding
+ first checking the encoding
'''
- if isinstance(message,bytes):
+ if isinstance(message, bytes):
message = message.decode('utf-8')
stream.write(message)
- def get_logs(self,join_newline=True):
+ def get_logs(self, join_newline=True):
''''get_logs will return the complete history, joined by newline
- (default) or as is.
+ (default) or as is.
'''
if join_newline:
return '\n'.join(self.history)
@@ -154,12 +158,21 @@ def get_logs(self,join_newline=True):
- def show_progress(self,iteration,total,length=40,min_level=0,prefix=None,
- carriage_return=True,suffix=None,symbol=None):
+ def show_progress(self, iteration,
+ total,
+ length=40,
+ min_level=0,
+ prefix=None,
+ carriage_return=True,
+ suffix=None,
+ symbol=None):
'''create a terminal progress bar, default bar shows for verbose+
- :param iteration: current iteration (Int)
- :param total: total iterations (Int)
- :param length: character length of bar (Int)
+
+ Parameters
+ ==========
+ iteration: current iteration (Int)
+ total: total iterations (Int)
+ length: character length of bar (Int)
'''
percent = 100 * (iteration / float(total))
progress = int(length * iteration // total)
@@ -188,45 +201,48 @@ def show_progress(self,iteration,total,length=40,min_level=0,prefix=None,
if self.level > min_level:
percent = "%5s" %("{0:.1f}").format(percent)
output = '\r' + prefix + " |%s| %s%s %s" % (bar, percent, '%', suffix)
- sys.stdout.write(output),
+ sys.stdout.write(output)
if iteration == total and carriage_return:
sys.stdout.write('\n')
sys.stdout.flush()
+ def abort(self, message):
+ self.emit(ABRT, message, 'ABRT')
- def abort(self,message):
- self.emit(ABRT,message,'ABRT')
+ def error(self, message):
+ self.emit(ERROR, message, 'ERROR')
- def error(self,message):
- self.emit(ERROR,message,'ERROR')
+ def exit(self, message, return_code=1):
+ self.emit(ERROR, message, 'ERROR')
+ sys.exit(return_code)
- def warning(self,message):
- self.emit(WARNING,message,'WARNING')
+ def warning(self, message):
+ self.emit(WARNING, message, 'WARNING')
- def log(self,message):
- self.emit(LOG,message,'LOG')
+ def log(self, message):
+ self.emit(LOG, message, 'LOG')
- def info(self,message):
- self.emit(INFO,message)
+ def info(self, message):
+ self.emit(INFO, message)
def newline(self):
- self.write(self.outputStream,'\n')
+ self.write(self.outputStream, '\n')
- def verbose(self,message):
- self.emit(VERBOSE,message,"VERBOSE")
+ def verbose(self, message):
+ self.emit(VERBOSE, message, "VERBOSE")
- def verbose1(self,message):
- self.emit(VERBOSE,message,"VERBOSE1")
+ def verbose1(self, message):
+ self.emit(VERBOSE, message, "VERBOSE1")
- def verbose2(self,message):
- self.emit(VERBOSE2,message,'VERBOSE2')
+ def verbose2(self, message):
+ self.emit(VERBOSE2, message, 'VERBOSE2')
- def verbose3(self,message):
- self.emit(VERBOSE3,message,'VERBOSE3')
+ def verbose3(self, message):
+ self.emit(VERBOSE3, message, 'VERBOSE3')
- def debug(self,message):
- self.emit(DEBUG,message,'DEBUG')
+ def debug(self, message):
+ self.emit(DEBUG, message, 'DEBUG')
def is_quiet(self):
'''is_quiet returns true if the level is under 1
@@ -238,26 +254,27 @@ def is_quiet(self):
def get_logging_level():
'''get_logging_level will configure a logging to standard out based on the user's
- selected level, which should be in an environment variable called
- SOM_MESSAGELEVEL. if SOM_MESSAGELEVEL is not set, the maximum level
- (5) is assumed (all messages).
+ selected level, which should be in an environment variable called
+ SREGISTRY_MESSAGELEVEL. if SOM_MESSAGELEVEL is not set, the maximum level
+ (5) is assumed (all messages).
'''
- return int(os.environ.get("SOM_MESSAGELEVEL",5))
+ return int(os.environ.get("SREGISTRY_MESSAGELEVEL", 5))
def get_user_color_preference():
- COLORIZE = os.environ.get('SOM_COLORIZE',None)
+ '''see in the environment if the user wants to disable colored logging'''
+ COLORIZE = os.environ.get('SREGISTRY_COLORIZE', None)
if COLORIZE is not None:
COLORIZE = convert2boolean(COLORIZE)
return COLORIZE
def convert2boolean(arg):
- '''convert2boolean is used for environmental variables that must be
- returned as boolean'''
- if not isinstance(arg,bool):
- return arg.lower() in ("yes", "true", "t", "1","y")
- return arg
-
+ '''convert2boolean is used for environmental variables that must be
+ returned as boolean
+ '''
+ if not isinstance(arg, bool):
+ return arg.lower() in ("yes", "true", "t", "1", "y")
+ return arg
bot = ShubMessage()
diff --git a/shub/plugins/globus/actions.py b/shub/plugins/globus/actions.py
index f2e46fb3..28633ae3 100644
--- a/shub/plugins/globus/actions.py
+++ b/shub/plugins/globus/actions.py
@@ -9,15 +9,8 @@
'''
-from shub.apps.users.models import User
-from django.contrib.auth.decorators import login_required
-from social_django.models import UserSocialAuth
-from shub.plugins.globus.utils import (
- get_client,
- get_transfer_client
-)
from django.conf import settings
-import requests
+from shub.plugins.globus.utils import get_transfer_client
import globus_sdk
@@ -61,7 +54,7 @@ def do_transfer(user, endpoint, container):
endpoint,
label="Singularity Registry Transfer",
sync_level="checksum")
- tdata.add_item(source.replace(settings.MEDIA_ROOT,'/code/images'),
- source.replace(settings.MEDIA_ROOT,'').strip('/'))
+ tdata.add_item(source.replace(settings.MEDIA_ROOT, '/code/images'),
+ source.replace(settings.MEDIA_ROOT, '').strip('/'))
transfer_result = client.submit_transfer(tdata)
return transfer_result
diff --git a/shub/plugins/globus/decorators.py b/shub/plugins/globus/decorators.py
index aedd4ada..3febdbb1 100644
--- a/shub/plugins/globus/decorators.py
+++ b/shub/plugins/globus/decorators.py
@@ -8,10 +8,8 @@
'''
-from django.conf import settings
-from django.shortcuts import redirect, render
+from django.shortcuts import redirect
from django.contrib import messages
-from django.http import JsonResponse
def has_globus_association(function):
diff --git a/shub/plugins/globus/urls.py b/shub/plugins/globus/urls.py
index 77b62eab..a561b9fd 100644
--- a/shub/plugins/globus/urls.py
+++ b/shub/plugins/globus/urls.py
@@ -8,5 +8,5 @@
url(r'^endpoint/(?P.+?)/(?P\d+)?$', views.globus_endpoint, name='globus_endpoint'),
url(r'^endpoint/(?P\d+)/?$', views.globus_endpoint, name='globus_endpoint'),
url(r'^transfer/(?P\d+)/?$', views.globus_transfer, name='globus_transfer'),
- url(r'^transfer/(?P.+?)/container/(?P\d+)/?$', views.submit_transfer,name='submit_transfer'),
+ url(r'^transfer/(?P.+?)/container/(?P\d+)/?$', views.submit_transfer, name='submit_transfer')
]
diff --git a/shub/plugins/globus/utils.py b/shub/plugins/globus/utils.py
index d4869270..09916d16 100644
--- a/shub/plugins/globus/utils.py
+++ b/shub/plugins/globus/utils.py
@@ -8,13 +8,9 @@
'''
-import pickle
-from shub.apps.users.models import User
+from django.conf import settings
from social_django.models import UserSocialAuth
-from django.contrib.auth import login
-import requests
import globus_sdk
-from django.conf import settings
def get_client():
'''return client to handle authentication'''
@@ -62,8 +58,7 @@ def associate_user(user, client, code):
# Associate with the user account
token_id = tokens.decode_id_token(client)
- # Look up the user based on email
- email = token_id['email']
+ # Look up the user based on token
try:
social = user.social_auth.get(provider="globus",
uid=token_id["sub"])
diff --git a/shub/plugins/globus/views.py b/shub/plugins/globus/views.py
index 3ebf013f..da6c010a 100644
--- a/shub/plugins/globus/views.py
+++ b/shub/plugins/globus/views.py
@@ -9,12 +9,9 @@
'''
from django.conf import settings
-from shub.logger import bot
-from shub.apps.main.views import get_container
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
-from django.contrib import messages
from django.http import JsonResponse
from .actions import (
get_endpoints,
@@ -22,12 +19,14 @@
search_endpoints
)
from .decorators import has_globus_association
+from shub.apps.main.views import get_container
from shub.plugins.globus.utils import (
get_client,
get_transfer_client,
associate_user
)
+from social_django.models import UserSocialAuth
from globus_sdk.exc import TransferAPIError
@@ -44,7 +43,7 @@ def globus_logout(request):
# Properly revoke and log out
social = request.user.social_auth.get(provider="globus")
- for resource, token_info in social.extra_data.items():
+ for _, token_info in social.extra_data.items():
for token, token_type in token_info.items():
client.oauth2_revoke_token(
token, additional_params={'token_type_hint': token_type})
@@ -88,9 +87,9 @@ def globus_login(request):
# Second step of authentication flow - we need to ask for token
code = request.GET.get('code')
- user = associate_user(request.user,
- client=client,
- code=code)
+ associate_user(request.user,
+ client=client,
+ code=code)
return redirect('globus_transfer')
@@ -109,7 +108,7 @@ def globus_transfer(request, cid=None, endpoints=None):
context = {'user': request.user,
'container': container,
- 'endpoint_search_term': "Search for..." }
+ 'endpoint_search_term': "Search for..."}
# Does the user want to search endpoints?
@@ -140,12 +139,12 @@ def globus_endpoint(request, endpoint_id=None, cid=None):
context = {'user': request.user,
'container': container,
- 'endpoint_search_term': "Search for..." }
+ 'endpoint_search_term': "Search for..."}
# Get the endpoint
try:
client = get_transfer_client(request.user)
- endpoints = [client.get_endpoint(endpoint_id).data]
+ endpoints = [client.get_endpoint(endpoint_id).data]
except TransferAPIError:
endpoints = get_endpoints(request.user)
@@ -163,7 +162,7 @@ def submit_transfer(request, endpoint, cid):
container = get_container(cid)
if container is None:
- message = "This container could not be found."
+ m = "This container could not be found."
else:
result = do_transfer(user=request.user,
@@ -171,9 +170,9 @@ def submit_transfer(request, endpoint, cid):
container=container)
- link = "https://globus.org/app/activity/%s" %result['task_id']
+ link = "https://globus.org/app/activity/%s" % result['task_id']
m = result['message']
- m = "%s: view task" %(m, link)
+ m = "%s: view task" % (m, link)
- status = {'message': m }
+ status = {'message': m}
return JsonResponse(status)
diff --git a/shub/plugins/google_build/__init__.py b/shub/plugins/google_build/__init__.py
new file mode 100644
index 00000000..93e7c961
--- /dev/null
+++ b/shub/plugins/google_build/__init__.py
@@ -0,0 +1,9 @@
+from django.conf import settings
+from shub.logger import bot
+
+# Ensure that application credentials exist
+for required in ['GOOGLE_APPLICATION_CREDENTIALS',
+ 'SREGISTRY_GOOGLE_PROJECT',
+ 'SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS']:
+ if not hasattr(settings, required):
+ bot.exit('%s not defined in secrets.' % required)
diff --git a/shub/plugins/google_build/actions.py b/shub/plugins/google_build/actions.py
new file mode 100644
index 00000000..f781dc1b
--- /dev/null
+++ b/shub/plugins/google_build/actions.py
@@ -0,0 +1,423 @@
+'''
+
+Copyright (C) 2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from dateutil.parser import parse
+from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
+from django.urls import reverse
+from shub.logger import bot
+from shub.apps.main.models import Container, Collection
+from sregistry.main.google_build.client import get_client
+from datetime import (
+ datetime,
+ timedelta
+)
+from sregistry.utils import get_recipe_tag
+from .utils import (
+ convert_size,
+ clear_container_payload,
+ create_container_payload,
+ JsonResponseMessage,
+ generate_jwt_token
+)
+import os
+import django_rq
+
+
+def trigger_build(sender, instance, **kwargs):
+ '''Trigger build will send a recipe directly to Google Cloud Build,
+ and create a container that will send a curl response back to
+ an endpoint here to signal that the build is complete.
+ Triggered by RecipePushSerializer.
+
+ Parameters
+ ==========
+ sender: should be the sending model, which is an RecipeFile instance
+ instance: is the instance of the RecipeFile
+ '''
+ collection = Collection.objects.get(name=instance.collection)
+ context = get_build_context()
+
+ print("IN TRIGGER BUILD")
+
+ # Instantiate client with context (connects to buckets)
+ client = get_client(debug=True, **context)
+
+ # Assemble the name
+ name = "%s/%s:%s" %(instance.collection, instance.name, instance.tag)
+
+ # The recipe needs to be in PWD to create the build package
+ recipe = instance.datafile.name
+ working_dir = os.path.dirname(recipe)
+
+ # Create a container (with status google-build) for the user to watch
+ try:
+ container = collection.containers.get(tag=instance.tag,
+ name=instance.name)
+
+ except ObjectDoesNotExist:
+ container = Container.objects.create(collection=collection,
+ tag=instance.tag,
+ name=instance.name)
+
+ # If it's frozen, don't submit
+ if container.frozen:
+ return JsonResponseMessage(message="Container is frozen.")
+
+ # Webhook response
+ webhook = "%s%s" % (settings.DOMAIN_NAME,
+ reverse('receive_build', kwargs={"cid": container.id}))
+
+ # Generate a one time use secret for jwt web token
+ container.metadata['builder'] = {"name": "google_build"}
+
+ payload = create_container_payload(container) # does not save
+
+ # Generate the jwt token
+ jwt_token = generate_jwt_token(secret=container.metadata['builder']['secret'],
+ payload=payload)
+
+ # Submit the build
+ response = client.build(name,
+ recipe=recipe,
+ working_dir=working_dir,
+ headless=True,
+ webhook=webhook,
+ extra_data={"token": jwt_token})
+
+ # Update the status for the container
+ if "status" in response:
+ container.metadata['build_metadata']['build']['status'] = response["status"]
+
+ # Add the metadata
+ container.metadata['build_metadata'] = response['metadata']
+ container.save()
+
+ print(response)
+ return JsonResponseMessage(message="Build received.")
+
+
+def receive_build(collection, recipes, branch):
+ '''receive_build will receive a build from GitHub, and then trigger
+ the same Google Cloud Build but using a GitHub repository (recommended).
+
+ Parameters
+ ==========
+ collection: the collection
+ recipes: a dictionary of modified recipe files to build
+ branch: the repository branch (kept as metadata)
+ '''
+ from .github import get_auth_token
+ context = get_build_context()
+
+ # Instantiate client with context (connects to buckets)
+ client = get_client(debug=True, **context)
+
+ print("RECIPES: %s" % recipes)
+
+ # Derive tag from the recipe, or default to latest
+ for recipe, metadata in recipes.items():
+
+ # First preference to command line, then recipe tag
+ tag = get_recipe_tag(recipe) or "latest"
+
+ # Get a container, if it exists, we've already written file here
+ try:
+ container = collection.containers.get(tag=tag)
+ except: # DoesNotExist
+ container = Container.objects.create(collection=collection,
+ tag=tag,
+ name=collection.name)
+
+ # If the container is frozen, no go
+ if container.frozen:
+ bot.debug('%s is frozen, will not trigger build.' % container)
+ continue
+
+ # Recipe path on Github
+ reponame = container.collection.metadata['github']['repo_name']
+
+ # If we don't have a commit, just send to recipe
+ if metadata['commit'] is None:
+ deffile = "https://www.github.com/%s/tree/%s/%s" %(reponame,
+ branch,
+ recipe)
+ else:
+ deffile = "https://www.github.com/%s/blob/%s/%s" %(reponame,
+ metadata['commit'],
+ recipe)
+ # Webhook response
+ webhook = "%s%s" % (settings.DOMAIN_NAME,
+ reverse('receive_build', kwargs={"cid": container.id}))
+
+ # Generate a one time use secret for jwt web token
+ container.metadata['builder'] = {"name":"google_build",
+ "deffile": deffile}
+
+ payload = create_container_payload(container) # does not save
+
+ # Generate the jwt token
+ jwt_token = generate_jwt_token(secret=container.metadata['builder']['secret'],
+ payload=payload)
+
+ # If the repo is private, we need to account for that
+ token = None
+ if collection.metadata['github'].get('private', False) is True:
+ token = get_auth_token(collection.owners.first())
+
+ # Submit the build with the GitHub repo and commit
+ response = client.build_repo("github.com/%s" % metadata['name'],
+ recipe=recipe,
+ headless=True,
+ token=token,
+ commit=metadata['commit'],
+ webhook=webhook,
+ extra_data={"token": jwt_token})
+
+ # Add the metadata
+ container.metadata['build_metadata'] = response['metadata']
+ container.save()
+
+
+def get_build_context():
+ '''get shared build context between recipe build (push of a recipe) and
+ GitHub triggered build. This function takes no arguments
+ '''
+ # We checked that the setting is defined, here ensure exists
+ if not os.path.exists(settings.GOOGLE_APPLICATION_CREDENTIALS):
+ bot.exit('%s does not exist.' % settings.GOOGLE_APPLICATION_CREDENTIALS)
+
+ # Provide all envars directly to client instead of environment
+ context = {'GOOGLE_APPLICATION_CREDENTIALS': settings.GOOGLE_APPLICATION_CREDENTIALS,
+ 'SREGISTRY_GOOGLE_PROJECT': settings.SREGISTRY_GOOGLE_PROJECT}
+
+ # Put the credentials in the environment to find
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = settings.GOOGLE_APPLICATION_CREDENTIALS
+
+ # The following are optional
+ for attr in ['SREGISTRY_GOOGLE_BUILD_CACHE',
+ 'SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION',
+ 'SREGISTRY_GOOGLE_STORAGE_PRIVATE',
+ 'SREGISTRY_GOOGLE_STORAGE_BUCKET']:
+ if hasattr(settings, attr):
+ context[attr] = getattr(settings, attr)
+ return context
+
+
+def delete_build(cid, client=None):
+ '''Delete artifacts for a container build, if they exist, along
+ with the container object. This is called
+ as a django-rq task for a worker to do from views.py
+
+ Parameters
+ ==========
+ cid: the container id to finish the build for, expected to have an id
+ '''
+ from shub.apps.main.views import get_container
+
+ container = get_container(cid)
+
+ # if being called from delete_container_collection, just instantiate once
+ if client is None:
+ context = get_build_context()
+ client = get_client(debug=True, **context)
+
+ # If the container has an image, delete it
+ image = container.get_image() or ""
+ if container.metadata['builder']['name'] == "google_build":
+
+ # Delete the image
+ if "storage.googleapis.com" in image:
+ print("deleting container %s" % image)
+ container_name = os.path.basename(image)
+ client.delete(container_name, force=True)
+
+ # Finally, delete the container
+ container.delete()
+
+
+def delete_container_collection(cid, uid):
+ '''Delete artifacts for a container build, if they exist, and then
+ the entire collection. This is called
+ as a django-rq task for a worker to do from views.py
+
+ Parameters
+ ==========
+ cid: the collection id to delete.
+ uid: the user id requesting permission
+ '''
+ from shub.apps.main.views import get_collection
+ from .github import delete_webhook
+ collection = get_collection(cid)
+
+ # Delete files before containers
+ containers = Container.objects.filter(collection=collection)
+
+ # Create a client to share
+ context = get_build_context()
+ client = get_client(debug=True, **context)
+
+ # Delete container build objects first
+ for container in containers:
+ delete_build(cid=container.id, client=client)
+
+ # Now handle the webhook (a separate task)
+ if "github" in collection.metadata:
+ django_rq.enqueue(delete_webhook,
+ user=uid,
+ repo=collection.metadata['github']['repo_name'],
+ hook_id=collection.metadata['github']['webhook']['id'])
+
+ # Finally, delete the collection
+ print("%s deleting." % collection)
+ collection.delete()
+
+
+def is_over_limit(limit=None):
+ '''check if we are over the limit for active builds. Returns a boolean to
+ indicate yes or no, based on filtering the number of total builds
+ by those with status "QUEUED" or "WORKING."
+
+ Parameters
+ ==========
+ limit: an integer limit for the maximum concurrent waiting or active
+ builds. If not set, we use the default in settings.
+ '''
+ # Allow the function to set a custom limit
+ limit = limit or settings.SREGISTRY_GOOGLE_BUILD_LIMIT
+
+ # Instantiate client with context (connects to buckets)
+ context = get_build_context()
+ client = get_client(debug=True, **context)
+
+ project = settings.SREGISTRY_GOOGLE_PROJECT
+ result = client._build_service.projects().builds().list(projectId=project,
+ filter='status="QUEUED" OR status="WORKING"').execute()
+ return len(result) > limit
+
+
+def complete_build(cid, params, check_again_seconds=10):
+ '''finish a build, meaning obtaining the original build_id for the container
+ and checking for completion.
+
+ Parameters
+ ==========
+ cid: the container id to finish the build for, expected to have an id
+ params: the parameters from the build. They must have matching build it.
+ check_again_seconds: if the build is still working, check again in this
+ many seconds. By default, we multiply by 2 each time
+ (exponential backoff).
+ '''
+ from shub.apps.main.views import get_container
+
+ print("RUNNING COMPLETE BUILD")
+ container = get_container(cid)
+
+ # Case 1: No id provided
+ if "id" not in params:
+ return JsonResponseMessage(message="Invalid request.")
+
+ # Case 2: the container is already finished or not a google build
+ if "build_metadata" not in container.metadata or "builder" not in container.metadata:
+ return JsonResponseMessage(message="Invalid request.")
+
+ # Case 3: It's not a Google Build
+ if container.metadata['builder'].get('name') != "google_build":
+ return JsonResponseMessage(message="Invalid request.")
+
+ # Google build will have an id here
+ build_id = container.metadata['build_metadata']['build']['id']
+ status = container.metadata['build_metadata']['build']['status']
+
+ # Case 4: Build is already finished
+ active = ["QUEUED", "WORKING"]
+ if status not in active:
+ return JsonResponseMessage(message="Invalid request.")
+
+ # Case 5: Build id doesn't match
+ if build_id != params['id']:
+ return JsonResponseMessage(message="Invalid request.")
+
+ context = get_build_context()
+
+ # Instantiate client with context (connects to buckets)
+ client = get_client(debug=True, **context)
+
+ # Get an updated status
+ response = client._finish_build(build_id)
+
+ print("RESPONSE")
+ print(response)
+
+ if "public_url" in response:
+ container.metadata['image'] = response['public_url']
+
+ elif "media_link" in response:
+ container.metadata['image'] = response['media_link']
+
+ elif "status" in response:
+
+ # If it's still working, schedule to check with exponential backoff
+ if response["status"] in ["QUEUED", "WORKING"]:
+ check_again_seconds = check_again_seconds*2
+ print("Build status WORKING: checking in %s seconds" % check_again_seconds)
+
+ # Get the scheduler, submit to check again
+ scheduler = django_rq.get_scheduler('default')
+ scheduler.enqueue_in(timedelta(seconds=check_again_seconds),
+ complete_build,
+ cid=container.id,
+ params=params,
+ check_again_seconds=check_again_seconds)
+
+
+ # This is an invalid status, and no action to take
+ else:
+ print("Invalid response, no container link and status not working.")
+ return
+
+ # Save the build finish
+ container.metadata['build_finish'] = response
+
+ # Clear the container metadata
+ container = clear_container_payload(container)
+
+ # Add response metrics (size and file_hash)
+ if "size" in response:
+ container.metrics["size_mb"] = round(convert_size(response['size'], "MB"), 3)
+
+ # Update the status
+ if "status" in response:
+ container.metadata['build_metadata']['build']['status'] = response["status"]
+
+ # If a file hash is included, we use this as the version (not commit)
+ if "crc32" in response:
+ container.metrics["crc32"] = response["crc32"]
+
+ # Add the version, also calculated by builder
+ if "sha256sum" in response:
+ container.metrics["sha256"] = "sha256.%s" % response['sha256sum']
+ container.version = "sha256.%s" % response['sha256sum']
+
+ # Keep an md5, for posterity
+ if "md5sum" in response:
+ container.metrics["md5"] = "md5.%s" % response['md5sum']
+
+ # Calculate total time
+ if "startTime" in response and "finishTime" in response:
+ total_time = parse(response['finishTime']) - parse(response['startTime'])
+ container.metrics['build_seconds'] = total_time.total_seconds()
+
+ # Created date
+ if "createTime" in response:
+ created_at = datetime.strftime(parse(response['createTime']), '%h %d, %Y')
+ container.metrics['created_at'] = created_at
+
+ container.save()
diff --git a/shub/plugins/google_build/github.py b/shub/plugins/google_build/github.py
new file mode 100644
index 00000000..a16c67da
--- /dev/null
+++ b/shub/plugins/google_build/github.py
@@ -0,0 +1,465 @@
+'''
+
+Copyright (C) 2016-2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.views.decorators.csrf import csrf_exempt
+from django.urls import reverse
+
+import django_rq
+
+from shub.apps.users.models import User
+from shub.logger import bot
+from shub.settings import (
+ DOMAIN_NAME,
+ SREGISTRY_GOOGLE_BUILD_LIMIT
+)
+from shub.apps.main.models import Collection
+
+from .utils import (
+ check_headers,
+ get_default_headers,
+ JsonResponseMessage,
+ load_body,
+ paginate,
+ validate_payload,
+ DELETE,
+ POST
+)
+
+from dateutil.parser import parse
+from datetime import datetime
+import re
+import requests
+
+
+api_base = 'https://api.github.com'
+
+
+## Calls
+
+def get_auth(user, headers=None, idx=0):
+ '''get_auth will return the authentication header for a user
+ the default headers (without auth) are returned if provider not github
+
+ Parameters
+ ==========
+ user: a user object
+ '''
+ if headers is None:
+ headers = get_default_headers()
+
+ # Tasks might provide a user id instead
+ if not isinstance(user, User):
+ try:
+ user = User.objects.get(id=user)
+ except User.DoesNotExist:
+ pass
+
+ token = get_auth_token(user, idx)
+
+ if token is not None:
+ token = "token %s" %(token)
+ headers["Authorization"] = token
+ return headers
+
+
+def get_auth_token(user, idx=0):
+ '''get_auth_token will return the auth token for a user.
+
+ Parameters
+ ==========
+ user: a user object
+ '''
+ # 1. Github private first priority
+ auth = [x for x in user.social_auth.all() if x.provider == 'github-private']
+
+ # 2. Github public second priority
+ if not auth:
+ auth = [x for x in user.social_auth.all() if x.provider == 'github']
+
+ if len(auth) > idx:
+ return auth[idx].access_token
+ else:
+ return auth[0].access_token
+
+
+def get_repo(user, reponame, username, headers=None):
+ '''get_repo will return a single repo, username/reponame
+ given authentication with user
+
+ Parameters
+ ==========
+ user: the user to get github credentials for
+ reponame: the name of the repo to retrieve
+ username: the username of the repo (owner)
+ '''
+ # Case 1, the user just has one auth or just public
+ if headers is None:
+ headers = get_auth(user)
+ headers['Accept'] = "application/vnd.github.mercy-preview+json"
+ url = "%s/repos/%s/%s" %(api_base, username, reponame)
+ response = requests.get(url, headers=headers)
+
+ # Case 2: public and private
+ if response.status_code != 200:
+ auth_headers = get_auth(user, idx=1)
+ headers.update(auth_headers)
+ response = requests.get(url, headers=headers)
+ response = response.json()
+ return response
+
+
+def list_repos(user, headers=None):
+ '''list_repos will list the public repos for a user
+
+ Parameters
+ ==========
+ user: a user object to list
+ headers: headers to replace default
+ '''
+ if headers is None:
+ headers = get_auth(user)
+ url = "%s/user/repos" %(api_base)
+ repos = paginate(url=url, headers=headers)
+
+ if not repos:
+ auth_headers = get_auth(user, idx=1)
+ headers.update(auth_headers)
+ repos = paginate(url=url, headers=headers)
+ return repos
+
+
+def get_commits(user, uri, headers=None, sha=None, limit=None):
+ '''list_repos will list the public repos for a user
+
+ Parameters
+ ==========
+ user: the user that owns the repository
+ uri: the username/repo
+ page: the page of results to return, if none, paginates
+ '''
+ if not headers:
+ headers = get_auth(user)
+ headers['Accept'] = "application/vnd.github.cryptographer-preview"
+ url = "%s/repos/%s/commits" %(api_base, uri)
+
+ # Option 1: return a sha
+ if sha:
+ url = "%s/%s" %(url, sha)
+ response = requests.get(url, headers=headers)
+ if response.status_code != 200:
+ headers.update(get_auth(user, idx=1))
+ response = requests.get(url, headers=headers)
+ return [response.json()]
+
+ # Option 2, return paginated commits
+ commits = paginate(url=url, headers=headers, limit=limit)
+ if not commits:
+ auth_headers = get_auth(user, idx=1)
+ headers.update(auth_headers)
+ commits = paginate(url=url, headers=headers, limit=limit)
+ bot.debug('Found %s commits'% len(commits))
+ return commits
+
+
+def get_branch_commits(user, uri, branch):
+ '''get all commits for a particular branch
+ '''
+ headers = get_auth(user)
+ headers['Accept'] = "application/vnd.github.cryptographer-preview"
+ url = "%s/repos/%s/commits?sha=%s" %(api_base, uri, branch)
+ response = requests.get(url=url, headers=headers)
+ if response.status_code != 200:
+ auth_headers = get_auth(user, idx=1)
+ headers.update(auth_headers)
+ response = requests.get(url=url, headers=headers)
+ return response.json()
+
+
+def get_commit_date(commit):
+ '''return the commit author date, then the committer'''
+ return (commit['commit'].get('author').get('date') or
+ commit['commit'].get('committer').get('date'))
+
+
+def get_commits_since(commits, since):
+ '''from a list of commits, return those in a list that occured since
+ a provided date (since)
+ '''
+ updates = []
+ seen = []
+
+ def isnew(changed, since):
+ if isinstance(since, int):
+ since = datetime.fromtimestamp(since).strftime('%Y-%m-%dT%H:%M:%SZ')
+ if parse(changed) >= parse(since):
+ return True
+ return False
+
+ for commit in commits:
+ commit_date = get_commit_date(commit)
+ if isnew(commit_date, since) and commit['sha'] not in seen:
+ updates.append(commit)
+ seen.append(commit['sha'])
+
+ return updates
+
+
+def get_commit_details(collection, since=None, headers=None, limit=None):
+ '''get changed files will find changed files since a particular date.
+ If since is not defined, we take the following logic:
+
+ 1. First compare the commits date against the repo pushed_date. If
+ commits are found, differences are determined between those ranges.
+ 2. If the webhook was created without a new commit/push, then fall back
+ to comparing commits to the webhook creation date
+ 3. If still no passing ranges, parse entire result for changed files,
+ and return the most recent of each.
+ '''
+ if since is None:
+ since = collection.metadata['github']['pushed_at']
+ commits = get_commits(user=collection.owners.first(), # user created repo
+ uri=collection.metadata['github']['repo_name'],
+ limit=limit)
+
+ # If the collection has no containers, assess all commits
+ if not collection.containers.count():
+ updates = commits
+ else:
+ # First pass, commit date vs. repo[pushed_at]
+ updates = get_commits_since(commits, since)
+
+ # Second pass, commit date vs. webhook[created_at]
+ if not updates:
+ since = collection.metadata['github']['webhook']['created_at']
+ updates = get_commits_since(commits, since)
+
+ # Last try, since repo created
+ if not updates:
+ since = collection.metadata['github']['created_at']
+ updates = get_commits_since(commits, since)
+
+ commits = []
+
+ # Retrieve commits with complete list of changes
+ for commit in updates:
+ response = get_commits(user=collection.owners.first(),
+ uri=collection.metadata['github']['repo_name'],
+ sha=commit['sha'])
+ if response:
+ commits = commits + response
+ return commits
+
+
+## Create
+
+def create_webhook(user, repo, secret):
+ '''create_webhook will create a webhook for a repo to send back
+ to singularity hub on push
+
+ Parameters
+ ==========
+ user: user: should be a singularity hub user. This is used to get
+ the Github authentication
+ repo: should be a complete repo object, including username and reponame
+ secret: should be a randomly generated string, created when repo connected,
+ to validate future pushes
+ '''
+ headers = get_auth(user)
+
+ url = "%s/repos/%s/hooks" %(api_base, repo['full_name'])
+
+ callback_url = "%s%s/" %(DOMAIN_NAME.strip('/'), reverse('receive_hook'))
+
+ config = {"url": callback_url,
+ "content_type": "json",
+ "secret": secret}
+
+ params = {"name": "web",
+ "active": True,
+ "events": ["push", "deployment"],
+ "config": config}
+
+ # Create webhook
+ response = POST(url, headers=headers, data=params)
+ if response.status_code is not 201:
+ headers.update(get_auth(user, idx=1))
+ response = POST(url, headers=headers, data=params)
+
+ response = response.json()
+
+ # Get topics
+ full_repo = get_repo(user=user,
+ headers=headers,
+ reponame=repo['name'],
+ username=repo['owner']['login'])
+
+ response['topics'] = full_repo['topics']
+ return response
+
+
+def update_webhook_metadata(repo):
+ '''based on a repository field from a webhook payload, return an
+ updated data structure with fields that we care about.
+
+ Parameters
+ ==========
+ repo: the repository object to get fields from
+ '''
+ return {'repo': repo['clone_url'],
+ 'private': repo['private'],
+ 'description': repo['description'],
+ 'created_at': repo['created_at'],
+ 'updated_at': repo['updated_at'],
+ 'pushed_at': repo['pushed_at'],
+ 'repo_id': repo['id'],
+ 'repo_name': repo['full_name']}
+
+## Delete
+
+def delete_webhook(user, repo, hook_id):
+ '''delete_webhook will delete a webhook, done when a user deletes a collection.
+ https://developer.github.com/v3/repos/hooks/#delete-a-hook
+ DELETE /repos/:owner/:repo/hooks/:hook_id
+
+ Parameters
+ ==========
+ user: should be a singularity hub user. This is used to get
+ the Github authentication
+ repo: should be a complete repo object, including username and reponame
+ '''
+ headers = get_auth(user)
+
+ url = "%s/repos/%s/hooks/%s" %(api_base, repo, hook_id)
+
+ response = DELETE(url, headers)
+ if response.status_code != 200:
+ headers.update(get_auth(user, idx=1))
+ response = DELETE(url, headers)
+
+ response = response.json()
+ return response
+
+
+
+
+################################################################################
+# WEBHOOK
+################################################################################
+
+
+@csrf_exempt
+def receive_github_hook(request):
+ '''receive_hook will receive a Github webhook, generate a new Container
+ for the collection, and then send the image to the build queue. For now,
+ we accept just collection builds
+
+ :: notes
+ - The container collection (repo) is looked up via the repo's Github name
+ - The user must be the owner of the container collection, associated with
+ the Github account
+ '''
+ # We do these checks again for sanity
+ if request.method == "POST":
+
+ # Has to have Github-Hookshot
+ if not re.search('GitHub-Hookshot', request.META["HTTP_USER_AGENT"]):
+ return JsonResponseMessage(message="Agent not allowed")
+
+ # Only allow application/json content type
+ if request.META["CONTENT_TYPE"] != "application/json":
+ return JsonResponseMessage(message="Incorrect content type")
+
+ # Check that it's coming from the right place
+ required_headers = ['HTTP_X_GITHUB_DELIVERY', 'HTTP_X_GITHUB_EVENT']
+ if not check_headers(request, required_headers):
+ return JsonResponseMessage(message="Agent not allowed")
+
+ # Has to be a ping or push
+ if request.META["HTTP_X_GITHUB_EVENT"] not in ["ping", "push", "deployment"]:
+ return JsonResponseMessage(message="Incorrect delivery method.")
+
+ # Parse the body
+ payload = load_body(request)
+ repo = payload.get('repository')
+
+ print(repo['full_name'])
+ try:
+ collection = Collection.objects.get(name=repo['full_name'])
+ except Collection.DoesNotExist:
+ return JsonResponseMessage(message="Collection not found",
+ status=404)
+
+ # Update repo metadata that might change
+ collection.metadata['github'].update(update_webhook_metadata(repo))
+ collection.save()
+
+ # We only currently parse user collections for Github
+ do_build = False
+
+ # We build on deployment
+ if "deployment" in payload:
+ if payload['deployment']['task'] == "deploy":
+ do_build = True
+
+ else:
+ do_build = True
+
+ print("IN RECEIVE HOOK, DO BUILD IS %s" % do_build)
+ if do_build:
+ return verify_payload(request, collection)
+
+ return JsonResponseMessage(status_message="Received, building disabled.", status=200)
+ return JsonResponseMessage(message="Invalid request.")
+
+
+def verify_payload(request, collection):
+ '''verify payload will verify a payload'''
+
+ from shub.plugins.google_build.tasks import parse_hook
+ from shub.plugins.google_build.actions import is_over_limit
+
+ payload = load_body(request)
+
+ # Validate the payload with the collection secret
+ signature = request.META.get('HTTP_X_HUB_SIGNATURE')
+ if not signature:
+ return JsonResponseMessage(message="Missing credentials.")
+
+ status = validate_payload(collection=collection,
+ payload=request.body,
+ request_signature=signature)
+ if not status:
+ return JsonResponseMessage(message="Invalid credentials.")
+
+ # If a branch is provided, this is the version "ref": "refs/heads/master",
+ try:
+ branch = payload.get('ref', 'refs/heads/master').replace('refs/heads/', '')
+ except:
+ branch = "master"
+
+ # Some newer webhooks have commits
+ commits = payload.get('commits')
+
+ # Ensure we aren't over limit
+ if is_over_limit():
+ message = ("Registry concurrent build limit is " +
+ "%s" % SREGISTRY_GOOGLE_BUILD_LIMIT + ". Please try again later.")
+
+ return JsonResponseMessage(message=message,
+ status_message="Permission Denied")
+
+ res = django_rq.enqueue(parse_hook, cid=collection.id,
+ branch=branch,
+ commits=commits)
+
+ print(res)
+ return JsonResponseMessage(message="Hook received and parsing.",
+ status=200,
+ status_message="Received")
diff --git a/shub/plugins/google_build/models.py b/shub/plugins/google_build/models.py
new file mode 100644
index 00000000..68c83749
--- /dev/null
+++ b/shub/plugins/google_build/models.py
@@ -0,0 +1,74 @@
+'''
+
+Copyright (C) 2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.db.models.signals import post_save
+from django.conf import settings
+from django.db import models
+from shub.apps.api.models.storage import OverwriteStorage
+from .actions import trigger_build
+import uuid
+import os
+
+
+def get_upload_folder(instance, filename):
+ '''a helper function to upload a recipe file to storage.
+ '''
+ from shub.apps.main.models import Collection
+ collection_name = instance.collection.lower()
+ instance.collection = collection_name
+
+ # First get a collection
+ try:
+ collection = Collection.objects.get(name=collection_name)
+ except Collection.DoesNotExist:
+ collection = Collection.objects.create(name=collection_name)
+ collection.secret = str(uuid.uuid4())
+ collection.save()
+
+ # Create collection root, if it doesn't exist
+ image_home = os.path.join(settings.MEDIA_ROOT, collection_name)
+ recipe_home = os.path.join(image_home, 'recipes')
+
+ for dirname in [image_home, recipe_home]:
+ if not os.path.exists(dirname):
+ os.mkdir(dirname)
+
+ return os.path.join(recipe_home, filename)
+
+
+class RecipeFile(models.Model):
+ '''a RecipeFile is a Singularity Recipe pushed to do a remote build.
+ '''
+ created = models.DateTimeField(auto_now_add=True)
+ collection = models.CharField(max_length=200, null=False)
+ tag = models.CharField(max_length=200, null=False)
+ name = models.CharField(max_length=200, null=False)
+ owner_id = models.CharField(max_length=200, null=True)
+ datafile = models.FileField(upload_to=get_upload_folder,
+ max_length=255,
+ storage=OverwriteStorage())
+
+ def get_label(self):
+ return "recipefile"
+
+ def __str__(self):
+ if hasattr(self.datafile, 'name'):
+ return self.datafile.name
+ return self.get_label()
+
+ def get_abspath(self):
+ return os.path.join(settings.MEDIA_ROOT, self.datafile.name)
+
+ class Meta:
+ app_label = 'api'
+
+
+# Trigger a build when a recipe is uploaded
+post_save.connect(trigger_build, sender=RecipeFile)
diff --git a/shub/plugins/google_build/tasks.py b/shub/plugins/google_build/tasks.py
new file mode 100644
index 00000000..eddc8ff2
--- /dev/null
+++ b/shub/plugins/google_build/tasks.py
@@ -0,0 +1,213 @@
+'''
+
+Copyright (C) 2016-2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.conf import settings
+from dateutil.parser import parse
+import django_rq
+import os
+import re
+
+
+
+def prepare_build_task(cid, recipes, branch):
+ '''wrapper to prepare build, to run as a task
+
+ Parameters
+ ==========
+ cid: the collection id to retrieve the collection
+ recipes: a dictionary of modified recipe files to build
+ branch: the repository branch (kept as metadata)
+ '''
+ print('RUNNING PREPARE BUILD TASK WITH RECIPES %s' % recipes)
+ from .actions import receive_build
+ from shub.apps.main.views import get_collection
+ collection = get_collection(cid)
+ receive_build(collection=collection,
+ recipes=recipes,
+ branch=branch)
+
+
+
+
+def parse_hook(cid,
+ branch="master",
+ commits=None):
+
+ '''parse hook will take a request and an associated user collection,
+ and finish parsing. This means generating the new container,
+ and submitting a job to run on Google Cloud Build.
+ '''
+ from shub.apps.main.views import get_collection
+
+ print("RUNNING PARSE HOOK")
+ collection = get_collection(cid)
+
+ # Determine changed Singularity file(s)
+ if commits is None:
+ return build_previous_commits(collection, branch)
+ return build_commits(collection, commits, branch)
+
+
+def build_commits(collection, commits, branch):
+ '''build commits that come directly from a ping. For this version, we get
+ a data structure with a list of added, removed, and modified files.
+
+ Parameters
+ ==========
+ collection: The collection to get details for.
+ commits: the commits to build.
+ '''
+ modified = dict()
+ removed = []
+
+ # Find changed files!
+ for commit in commits:
+
+ print(commit)
+ commit_id = commit.get('id')
+ commit_date = commit.get('timestamp')
+ files = commit['modified'] + commit['added']
+
+ # Add Removed files to compare with after
+ for filename in commit['removed']:
+ removed.append({"name": filename,
+ "id": commit_id,
+ "date": commit_date})
+
+ for filename in files:
+
+ # Supports building from Singularity recipes
+ if re.search("Singularity", filename):
+ add_record = True
+
+ if filename in modified:
+
+ # Don't add if we have more recent
+ if parse(commit_date) < parse(modified[filename]['date']):
+ add_record = False
+
+ # Do we add or remove?
+ if add_record:
+ modified[filename] = {
+ 'url': commit['url'],
+ 'commit': commit_id,
+ 'date': commit_date,
+ 'name': collection.metadata['github']['repo_name']}
+
+
+ print("MODIFIED RECIPES BEFORE RENAME %s" % modified)
+
+ # If the previous filename date is later than the record
+ for entry in removed:
+
+ # If the entry was modified before it was removed, remove it
+ if entry['name'] in modified:
+ if parse(modified[entry['name']]['date']) < parse(entry['date']):
+ del modified[entry['name']]
+
+ print("MODIFIED RECIPES AFTER RENAME %s" % modified)
+
+ # If we have records after parsing
+ if modified:
+
+ # This function submits the google build
+ django_rq.enqueue(prepare_build_task, cid=collection.id,
+ recipes=modified,
+ branch=branch)
+
+
+def build_previous_commits(collection, branch):
+ '''the result we get when we get commit details (from the API)
+ versus an actual commit object is different. This function parses
+ the results from "get_commit_details"
+
+ Parameters
+ ==========
+ collection: The collection to get details for.
+ '''
+ from .github import get_commit_details
+ commits = get_commit_details(collection, limit=25)
+
+ modified = []
+ renamed = []
+
+ # Find changed files!
+ for commit in commits:
+
+ commit_id = commit.get('sha') or commit.get('id')
+ commit_date = commit['commit']['committer']['date']
+
+ for record in commit['files']:
+
+ # We care about absolute basename paths
+ filename = record['filename']
+
+ # The file could have been removed
+ if record['status'] == 'removed':
+ continue
+
+ # Only going to build updated recipes
+ elif record['status'] in ['added', 'modified', 'renamed']:
+
+ # Supports building from Singularity recipes
+ if re.search("Singularity", filename):
+
+ # If the record is renamed after in modified, don't add
+ if record['status'] == 'renamed':
+ renamed.append({"to": record['filename'],
+ "from": record['previous_filename'],
+ "date": commit_date})
+
+ # The same file
+ elif record['status'] in ['added', 'modified']:
+ modified.append({'url': commit['url'],
+ 'commit': commit_id,
+ 'recipe': record['filename'],
+ 'date': commit_date,
+ 'name': collection.metadata['github']['repo_name']})
+
+ # Now assemble keepers
+ keepers = {}
+ for entry in modified:
+
+ # The recipe is what we compare to
+ recipe = os.path.basename(entry['recipe'])
+ if recipe in keepers:
+
+ # Update if it's more recent
+ if parse(keepers[recipe]['date']) < parse(entry['date']):
+ keepers[recipe] = entry
+
+ else:
+ keepers[recipe] = entry
+
+
+ # Change the recipe index back to the correct filename
+ modified = {}
+ for recipe, metadata in keepers.items():
+ modified[metadata['recipe']] = metadata
+
+ # If the previous filename date is later than the record
+ for entry in renamed:
+
+ # If the entry was modified before it was renamed, remove it
+ if entry['from'] in modified:
+ if parse(modified[entry['from']]['date']) < parse(entry['date']):
+ del modified[entry['from']]
+
+ print("MODIFIED RECIPES AFTER RENAME %s" % modified)
+
+ # If we have records after parsing
+ if modified:
+
+ # This function submits the google build
+ django_rq.enqueue(prepare_build_task, cid=collection.id,
+ recipes=modified,
+ branch=branch)
diff --git a/shub/plugins/google_build/templates/google_build/add_collection.html b/shub/plugins/google_build/templates/google_build/add_collection.html
new file mode 100644
index 00000000..b5b467fc
--- /dev/null
+++ b/shub/plugins/google_build/templates/google_build/add_collection.html
@@ -0,0 +1,119 @@
+{% extends "base/base.html" %}
+{% load staticfiles %}
+
+{% block css %}
+
+{% endblock %}
+
+{% block content %}
+{% include 'messages/message.html' %}
+
+
+
+
+
+
+
+
+{% endblock %}
+
+{% block scripts %}
+
+
+{% endblock %}
diff --git a/shub/plugins/google_build/templates/google_build/status.html b/shub/plugins/google_build/templates/google_build/status.html
new file mode 100644
index 00000000..36decfee
--- /dev/null
+++ b/shub/plugins/google_build/templates/google_build/status.html
@@ -0,0 +1,9 @@
+{% if container.metadata.build_metadata.build.logUrl %}{% if edit_permission %}{% else %}{% endif %}{% endif %}
+{% if container.metadata.build_metadata.build.status == "SUCCESS" %}
+{% else %}{% endif %}
+{% if container.metadata.build_metadata.build.logUrl %}{% endif %}
diff --git a/shub/plugins/google_build/urls.py b/shub/plugins/google_build/urls.py
new file mode 100644
index 00000000..b8b2274f
--- /dev/null
+++ b/shub/plugins/google_build/urls.py
@@ -0,0 +1,28 @@
+'''
+
+Copyright (C) 2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.conf.urls import include, url
+from shub.plugins.google_build import views
+from rest_framework import routers
+
+router = routers.DefaultRouter()
+router.register(r'^build', views.RecipePushViewSet, base_name="build") # build
+
+urlpatterns = [
+ url(r'^github/receive/?$', views.receive_hook, name="receive_hook"),
+ url(r'^build/receive/(?P\d+)/?$', views.receive_build, name="receive_build"),
+ url(r'^delete/container/(?P\d+)/?$', views.delete_container,
+ name="delete_google_container"),
+ url(r'^delete/collection/(?P\d+)/?$', views.delete_collection,
+ name="delete_google_collection"),
+ url(r'^github/save/?$', views.save_collection, name="save_collection"),
+ url(r'^github/connect/?$', views.connect_github, name="google_build_connect"),
+ url(r'^', include(router.urls))
+]
diff --git a/shub/plugins/google_build/utils.py b/shub/plugins/google_build/utils.py
new file mode 100644
index 00000000..6a678acc
--- /dev/null
+++ b/shub/plugins/google_build/utils.py
@@ -0,0 +1,285 @@
+'''
+
+Copyright (C) 2016-2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.http import JsonResponse
+from django.conf import settings
+from sregistry.logger import RobotNamer
+from datetime import datetime, timedelta
+import hashlib
+import hmac
+import json
+import jwt
+import requests
+import uuid
+
+################################################################################
+# REQUESTS
+################################################################################
+
+
+def POST(url, headers, data=None, params=None):
+ '''post_url will use the requests library to post to a url
+ '''
+ if data is not None:
+ return requests.post(url,
+ headers=headers,
+ data=json.dumps(data))
+ return requests.get(url, headers=headers)
+
+
+def DELETE(url, headers, data=None, params=None):
+ '''issue a delete reqest, with or without data and params.
+ '''
+ if data is not None:
+ return requests.delete(url,
+ headers=headers,
+ data=json.dumps(data))
+ return requests.delete(url, headers=headers)
+
+
+def format_params(url, params):
+ '''format_params will add a list of params (?key=value) to a url
+
+ Parameters
+ ==========
+ params: a dictionary of params to add
+ url: the url to add params to
+ '''
+ # Always try to get 100 per page
+ params["per_page"] = 100
+ count = 0
+ for param, value in params.items():
+ if count == 0:
+ url = "%s?%s=%s" %(url, param, value)
+ else:
+ url = "%s&%s=%s" %(url, param, value)
+ count += 1
+ return url
+
+
+def paginate(url, headers, min_count=30, start_page=1, params=None, limit=None):
+ '''paginate will send posts to a url with post_url
+ until the results count is not exceeded
+
+ Parameters
+ ==========
+ min_count: the results count to go to
+ start_page: the starting page
+ '''
+ if params is None:
+ params = dict()
+ result = []
+ result_count = 1000
+ page = start_page
+ while result_count >= 30:
+
+ # If the user set a limit, honor it
+ if limit is not None:
+ if len(result) >= limit:
+ return result
+
+ params['page'] = page
+ paginated_url = format_params(url, params)
+ new_result = requests.get(paginated_url, headers=headers).json()
+ result_count = len(new_result)
+
+ # If the user triggers bad credentials, empty repository, stop
+ if isinstance(new_result, dict):
+ return result
+
+ result = result + new_result
+ page += 1
+ return result
+
+
+def validate_payload(collection, payload, request_signature):
+ '''validate_payload will retrieve a collection secret, use it
+ to create a hexdigest of the payload (request.body) and ensure
+ that it matches the signature in the header). This is what we use
+ for GitHub webhooks. The secret used is NOT the collection secret,
+ but a different one for GitHub.
+
+ Parameters
+ ==========
+ collection: the collection object with the secret
+ payload: the request body sent by the service
+ request_signature: the signature to compare against
+ '''
+ secret = collection.metadata['github']['secret'].encode('utf-8') # converts to bytes
+ digest = hmac.new(secret,
+ digestmod=hashlib.sha1,
+ msg=payload).hexdigest()
+ signature = 'sha1=%s' %(digest)
+ return hmac.compare_digest(signature, request_signature)
+
+
+################################################################################
+# JWT
+################################################################################
+
+def get_container_payload(container):
+ '''a helper function to return a consistent container payload.
+
+ Parameters
+ ==========
+ container: a container object to get a payload for
+ '''
+ return {
+ "collection": container.collection.id,
+ "container": container.id,
+ "robot-name": container.metadata['builder']['robot_name'],
+ "tag": container.tag
+ }
+
+
+def create_container_payload(container):
+ '''a helper function to create a consistent container payload.
+
+ Parameters
+ ==========
+ container: a container object to create a payload for
+ '''
+ if "builder" not in container.metadata:
+ container.metadata['builder'] = {}
+
+ if "robot_name" not in container.metadata["builder"]:
+ container.metadata['builder']['robot_name'] = RobotNamer().generate()
+
+ # Always create a new secret
+ container.metadata['builder']['secret'] = str(uuid.uuid4())
+ container.save()
+ return get_container_payload(container)
+
+
+def clear_container_payload(container):
+ '''after we receive the build response, we clear the payload metadata
+ so it cannot be used again. This function does not save, but returns
+ the container for the calling function to do so.
+
+ Parameters
+ ==========
+ container: a container object to clear payload secrets for
+ '''
+ if "builder" in container.metadata:
+ if "robot_namer" in container.metadata['builder']:
+ del container.metadata['builder']['robot_namer']
+
+ if "secret" in container.metadata['builder']:
+ del container.metadata['builder']['secret']
+
+ return container
+
+
+def validate_jwt(container, params):
+ '''Given a container (with a build secret and other metadata) validate
+ a token (if it exists). If valid, return true. Otherwise,
+ return False.
+ '''
+ if "token" not in params:
+ return False
+
+ # The secret is removed after one response
+ if "secret" not in container.metadata['builder']:
+ return False
+
+ secret = container.metadata['builder']['secret']
+
+ # Validate the payload
+ try:
+ payload = jwt.decode(params['token'], secret, algorithms=["HS256"])
+ except (jwt.DecodeError, jwt.ExpiredSignatureError):
+ return False
+
+ # Compare against what we know
+ valid_payload = get_container_payload(container)
+
+ # Every field must be equal
+ for key, _ in valid_payload.items():
+ if key not in payload:
+ return False
+ if payload[key] != valid_payload[key]:
+ return False
+
+ return True
+
+
+def generate_jwt_token(secret, payload, algorithm="HS256"):
+ '''given a secret, an expiration in seconds, and an algorithm, generate
+ a jwt token to add as a header to the build response.
+
+ Parameters
+ ==========
+ secret: the container builder secret, only used once
+ payload: the payload to encode
+ algorithm: the algorithm to use.
+ '''
+ # Add an expiration of 8 hours to the payload
+ expires_in = settings.SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS
+ payload['exp'] = datetime.utcnow() + timedelta(seconds=expires_in)
+ return jwt.encode(payload, secret, algorithm).decode('utf-8')
+
+
+################################################################################
+# HEADERS/NAMING
+################################################################################
+
+
+def check_headers(request, headers):
+ '''check_headers will ensure that header keys are included in
+ a request. If one is missing, returns False
+
+ Parameters
+ ==========
+ request: the request object
+ headers: the headers (keys) to check for
+ '''
+ for header in headers:
+ if header not in request.META:
+ return False
+ return True
+
+
+def get_default_headers():
+ '''get_default_headers will return content-type json, etc.
+ '''
+ headers = {"Content-Type": "application/json"}
+ return headers
+
+
+def JsonResponseMessage(status=500, message=None, status_message='error'):
+ response = {'status': status_message}
+ if message is not None:
+ response['message'] = message
+ return JsonResponse(response, status=status)
+
+
+################################################################################
+# FORMATTING
+################################################################################
+
+
+def convert_size(size_bytes, to, bsize=1024):
+ '''A function to convert bytes to a human friendly string.
+ '''
+ a = {'KB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'PB': 5, 'EB': 6}
+ r = float(size_bytes)
+ for _ in range(a[to]):
+ r = r / bsize
+ return r
+
+
+def load_body(request):
+ '''load the body of a request.
+ '''
+ if isinstance(request.body, bytes):
+ payload = json.loads(request.body.decode('utf-8'))
+ else:
+ payload = json.loads(request.body)
+ return payload
diff --git a/shub/plugins/google_build/views.py b/shub/plugins/google_build/views.py
new file mode 100644
index 00000000..67521727
--- /dev/null
+++ b/shub/plugins/google_build/views.py
@@ -0,0 +1,355 @@
+'''
+
+Copyright (C) 2019 Vanessa Sochat.
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+
+from django.contrib.auth.decorators import login_required
+from django.views.decorators.csrf import csrf_exempt
+from django.conf import settings
+from django.contrib import messages
+from django.shortcuts import (
+ render,
+ redirect
+)
+
+from rest_framework.viewsets import ModelViewSet
+from rest_framework import serializers
+from rest_framework.exceptions import PermissionDenied
+from rest_framework.parsers import (
+ FormParser,
+ MultiPartParser
+)
+
+from shub.apps.main.views import (
+ get_container,
+ get_collection
+)
+
+from shub.apps.main.models import (
+ Collection,
+ Container
+)
+
+from shub.apps.api.utils import (
+ get_request_user,
+ validate_request,
+ has_permission
+)
+
+from sregistry.main.registry.auth import generate_timestamp
+from .github import (
+ create_webhook,
+ get_repo,
+ list_repos,
+ receive_github_hook,
+ update_webhook_metadata
+)
+
+from .models import RecipeFile
+import django_rq
+from datetime import timedelta
+
+from .actions import (
+ complete_build,
+ delete_build,
+ delete_container_collection,
+ is_over_limit
+)
+
+from .utils import (
+ JsonResponseMessage,
+ validate_jwt
+)
+import re
+import json
+import uuid
+
+@login_required
+def connect_github(request):
+ '''create a new container collection based on connecting GitHub.
+ '''
+
+ # All repos owned by the user on GitHub are contenders
+ contenders = list_repos(request.user)
+
+ # Filter down to repos that haven't had an equivalent URI added
+ # This is intentionally different from the uri that we push so that only
+ # builds can be supported from GitHub (and they don't cross contaminate)
+ collections = [x.name for x in Collection.objects.filter(owners=request.user)]
+
+ # Only requirement is that URI (name) isn't already taken, add to repos
+ repos = []
+ for repo in contenders:
+ if repo['full_name'] not in collections:
+ repos.append(repo)
+
+ context = {"repos": repos}
+ return render(request, "google_build/add_collection.html", context)
+
+
+@login_required
+def save_collection(request):
+ '''save the newly selected collection by the user.
+ '''
+
+ if request.method == "POST":
+
+ # The checked repos are sent in format REPO_{{ repo.owner.login }}/{{ repo.name }}
+ repos = [x.replace('REPO_', '') for x in request.POST.keys() if re.search("^REPO_", x)]
+ secret = str(uuid.uuid4())
+ webhook_secret = str(uuid.uuid4())
+
+ if repos:
+
+ # If the user doesn't have permission to create a collection
+ if not request.user.has_create_permission():
+ messages.error(request, "You do not have permission to create a collection.")
+ return redirect('collections')
+
+ # Always just take the first one
+ username, reponame = repos[0].split('/')
+
+ # Retrieve the repo fully
+ repo = get_repo(request.user,
+ reponame=reponame,
+ username=username)
+
+ # Collection needs to exist before webhook
+ collection = Collection.objects.create(secret=secret,
+ name=repo['full_name'])
+
+ collection.metadata['github'] = {'secret': webhook_secret}
+ collection.metadata['github'].update(update_webhook_metadata(repo))
+ collection.save()
+
+ webhook = create_webhook(user=request.user,
+ repo=repo,
+ secret=webhook_secret)
+
+ if "errors" in webhook:
+
+ # If there is an error, we should tell user about it
+ message = ','.join([x['message'] for x in webhook['errors']])
+ messages.info(request, "Errors: %s" % message)
+
+ # If the webhook was successful, it will have a ping_url
+ elif "ping_url" in webhook:
+
+ # Add minimal metadata about repo and webhook
+ collection.metadata['github']['webhook'] = webhook
+
+ collection.owners.add(request.user)
+
+ # Add tags
+ if "topics" in webhook:
+ if webhook['topics']:
+ for topic in webhook['topics']:
+ collection.tags.add(topic)
+ collection.save()
+
+ collection.save() # probably not necessary
+ return redirect(collection.get_absolute_url())
+
+ return redirect('collections')
+
+
+class RecipePushSerializer(serializers.HyperlinkedModelSerializer):
+
+ created = serializers.DateTimeField(read_only=True)
+ collection = serializers.CharField(read_only=True)
+ tag = serializers.CharField(read_only=True)
+ name = serializers.CharField(read_only=True)
+ owner_id = serializers.CharField(read_only=True)
+ datafile = serializers.FileField(read_only=True)
+
+ class Meta:
+ model = RecipeFile
+ fields = ('created', 'datafile', 'collection', 'owner_id', 'tag', 'name',)
+
+
+class RecipePushViewSet(ModelViewSet):
+ '''pushing a recipe coincides with doing a remote build.
+ '''
+ queryset = RecipeFile.objects.all()
+ serializer_class = RecipePushSerializer
+ parser_classes = (MultiPartParser, FormParser,)
+
+ def perform_create(self, serializer):
+
+ print(self.request.data)
+ tag = self.request.data.get('tag', 'latest')
+ name = self.request.data.get('name')
+ auth = self.request.META.get('HTTP_AUTHORIZATION', None)
+ collection_name = self.request.data.get('collection')
+
+ # Authentication always required for push
+
+ if auth is None:
+ print("auth is None")
+ raise PermissionDenied(detail="Authentication Required")
+
+ owner = get_request_user(auth)
+ timestamp = generate_timestamp()
+ payload = "build|%s|%s|%s|%s|" %(collection_name,
+ timestamp,
+ name,
+ tag)
+
+
+ # Validate Payload
+ if not validate_request(auth, payload, "build", timestamp):
+ print("auth is not valid for build")
+ raise PermissionDenied(detail="Unauthorized")
+
+ # Does the user have create permission?
+ if not owner.has_create_permission():
+ print("owned doesnt have create permission")
+ raise PermissionDenied(detail="Unauthorized Create Permission")
+
+ # Are we over the build limit?
+ if is_over_limit():
+ message = ("Registry concurrent build limit is " +
+ "%s" % SREGISTRY_GOOGLE_BUILD_LIMIT + ". Please try again later.")
+ print(message)
+ raise PermissionDenied(detail=message)
+
+ create_new = False
+
+ # Determine the collection to build the recipe to
+ try:
+ collection = Collection.objects.get(name=collection_name)
+
+ # Only owners can push to existing collections
+ if not owner in collection.owners.all():
+ print("user not in owners")
+ raise PermissionDenied(detail="Unauthorized")
+
+ except Collection.DoesNotExist:
+ print("collection does not exist")
+ raise PermissionDenied(detail="Not Found")
+
+ # Validate User Permissions
+ if not has_permission(auth, collection, pull_permission=False):
+ print("user does not have permissions.")
+ raise PermissionDenied(detail="Unauthorized")
+
+ # The collection must exist when we get here
+ try:
+ container = Container.objects.get(collection=collection,
+ name=name,
+ tag=tag)
+ if not container.frozen:
+ create_new = True
+
+ except Container.DoesNotExist:
+ create_new = True
+
+ # Create the recipe to trigger a build
+ print(self.request.data.get('datafile'))
+
+ if create_new is True:
+ serializer.save(datafile=self.request.data.get('datafile'),
+ collection=self.request.data.get('collection'),
+ tag=self.request.data.get('tag', 'latest'),
+ name=self.request.data.get('name'),
+ owner_id=owner.id)
+ else:
+ raise PermissionDenied(detail="%s is frozen, push not allowed." % container.get_short_uri())
+
+
+# Receive GitHub Hook
+
+@csrf_exempt
+def receive_build(request, cid):
+ '''receive_build will receive the post from Google Cloud Build.
+ we check the response header against the jwt token to authenticate,
+ and then check other metadata and permissions in complete_build.
+ '''
+ print(request.body)
+ print(cid)
+
+ if request.method == "POST":
+
+ # Must be an existing container
+ container = get_container(cid)
+ if container is None:
+ return JsonResponseMessage(message="Invalid request.")
+
+ # Decode parameters
+ params = json.loads(request.body.decode('utf-8'))
+
+ # Must include a jwt token that is valid for the container
+ if not validate_jwt(container, params):
+ return JsonResponseMessage(message="Invalid request.")
+
+ scheduler = django_rq.get_scheduler('default')
+ scheduler.enqueue_in(timedelta(seconds=10),
+ complete_build,
+ cid=container.id,
+ params=params)
+
+ return JsonResponseMessage(message="Notification Received",
+ status=200,
+ status_message="Received")
+
+ return JsonResponseMessage(message="Invalid request.")
+
+
+@login_required
+def delete_container(request, cid):
+ '''delete a container, including it's corresponding files
+ that are stored in Google Build (if they exist)
+ '''
+ container = get_container(cid)
+
+ if not container.has_edit_permission(request):
+ messages.info(request, "This action is not permitted.")
+ return redirect('collections')
+
+ # Send a job to the worker to delete the build files
+ django_rq.enqueue(delete_build, cid=container.id)
+ messages.info(request, 'Container successfully deleted.')
+ return redirect(container.collection.get_absolute_url())
+
+
+@login_required
+def delete_collection(request, cid):
+ '''delete a container collection that has Google Builds
+
+ Parameters
+ ==========
+ cid: the collection id to delete
+ '''
+ collection = get_collection(cid)
+
+ # Only an owner can delete
+ if not collection.has_edit_permission(request):
+ messages.info(request, "This action is not permitted.")
+ return redirect('collections')
+
+ # Queue the job to delete the collection
+ django_rq.enqueue(delete_container_collection,
+ cid=collection.id,
+ uid=request.user.id)
+
+ messages.info(request, 'Collection requested for deletion.')
+ return redirect('collections')
+
+
+@csrf_exempt
+def receive_hook(request):
+ '''receive_hook will forward a hook to the correct receiver depending on
+ the header information. If it cannot be determined, it is ignored.
+ '''
+ if request.method == "POST":
+
+ # Has to have Github-Hookshot
+ if re.search('GitHub-Hookshot', request.META["HTTP_USER_AGENT"]) is not None:
+ return receive_github_hook(request)
+
+ return JsonResponseMessage(message="Invalid request.")
diff --git a/shub/plugins/ldap_auth/__init__.py b/shub/plugins/ldap_auth/__init__.py
index 10dc890a..6e480b51 100644
--- a/shub/plugins/ldap_auth/__init__.py
+++ b/shub/plugins/ldap_auth/__init__.py
@@ -1,8 +1,8 @@
-AUTHENTICATION_BACKENDS = ('django_auth_ldap.backend.LDAPBackend',)
-
# Show LDAP log messages
import logging
+AUTHENTICATION_BACKENDS = ('django_auth_ldap.backend.LDAPBackend',)
+
logger = logging.getLogger('django_auth_ldap')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
diff --git a/shub/plugins/pam_auth/urls.py b/shub/plugins/pam_auth/urls.py
index b77862c8..d09744b9 100644
--- a/shub/plugins/pam_auth/urls.py
+++ b/shub/plugins/pam_auth/urls.py
@@ -10,4 +10,3 @@
auth_views.LoginView.as_view(template_name='pam_auth/logout.html'),
name="pam_auth-logout"),
]
-
diff --git a/shub/plugins/saml_auth/context_processors.py b/shub/plugins/saml_auth/context_processors.py
index 33e85a44..c3ee8ba4 100644
--- a/shub/plugins/saml_auth/context_processors.py
+++ b/shub/plugins/saml_auth/context_processors.py
@@ -15,4 +15,4 @@
def saml_processor(request):
return {'AUTH_SAML_IDP': AUTH_SAML_IDP,
- 'AUTH_SAML_INSTITUTION': AUTH_SAML_INSTITUTION }
+ 'AUTH_SAML_INSTITUTION': AUTH_SAML_INSTITUTION}
diff --git a/shub/settings/__init__.py b/shub/settings/__init__.py
index 58aac63a..7445847b 100644
--- a/shub/settings/__init__.py
+++ b/shub/settings/__init__.py
@@ -1,4 +1,6 @@
from importlib import import_module
+import os
+import sys
from .applications import *
from .config import *
@@ -9,9 +11,21 @@
from .tasks import *
# If PAM_AUTH in plugins enbled, add django_pam
-if "pam_auth" in INSTALLED_APPS:
+if "pam_auth" in PLUGINS_ENABLED:
INSTALLED_APPS += ['django_pam']
+# If google_build in use, we are required to include GitHub
+if "google_build" in PLUGINS_ENABLED:
+
+ # For task discovery by celery
+ SOCIAL_AUTH_GITHUB_SCOPE = ["admin:repo_hook",
+ "repo:status",
+ "user:email",
+ "read:org",
+ "admin:org_hook",
+ "deployment_status"]
+ ENABLE_GITHUB_AUTH = True
+
# Apply any plugin settings
for plugin in PLUGINS_ENABLED:
diff --git a/shub/settings/applications.py b/shub/settings/applications.py
index b9444ae8..f17a896a 100644
--- a/shub/settings/applications.py
+++ b/shub/settings/applications.py
@@ -23,15 +23,16 @@
'shub.apps.logs',
'shub.apps.main',
'shub.apps.users',
- 'shub.apps.singularity'
+ 'shub.apps.singularity',
+ 'shub.apps.library',
]
THIRD_PARTY_APPS = [
'social_django',
'crispy_forms',
+ 'django_rq',
'django_gravatar',
'django_extensions',
- 'djcelery',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
@@ -39,5 +40,4 @@
]
-
INSTALLED_APPS += THIRD_PARTY_APPS
diff --git a/shub/settings/config.py b/shub/settings/config.py
index eeecd2c9..98bebc98 100644
--- a/shub/settings/config.py
+++ b/shub/settings/config.py
@@ -11,11 +11,11 @@
# AUTHENTICATION
# Which social auths do you want to use?
-ENABLE_GOOGLE_AUTH=False
-ENABLE_TWITTER_AUTH=False
-ENABLE_GITHUB_AUTH=True
-ENABLE_GITLAB_AUTH=False
-ENABLE_BITBUCKET_AUTH=False
+ENABLE_GOOGLE_AUTH = False
+ENABLE_TWITTER_AUTH = False
+ENABLE_GITHUB_AUTH = True
+ENABLE_GITLAB_AUTH = False
+ENABLE_BITBUCKET_AUTH = False
# NOTE you will need to set autehtication methods up.
# Configuration goes into secrets.py
@@ -28,26 +28,30 @@
# DOMAIN NAMES
+## IMPORTANT: if/when you switch to https, you need to change "DOMAIN_NAME"
+# to have https, otherwise some functionality will not work (e.g., GitHub webhooks)
DOMAIN_NAME = "http://127.0.0.1"
DOMAIN_NAME_HTTP = "http://127.0.0.1"
-DOMAIN_NAKED = DOMAIN_NAME_HTTP.replace('http://','')
+DOMAIN_NAKED = DOMAIN_NAME_HTTP.replace('http://', '')
-ADMINS = (( 'vsochat', 'vsochat@gmail.com'),)
+ADMINS = (('vsochat', 'vsochat@gmail.com'),)
MANAGERS = ADMINS
HELP_CONTACT_EMAIL = 'vsochat@stanford.edu'
-HELP_INSTITUTION_SITE = 'srcc.stanford.edu'
+HELP_INSTITUTION_SITE = 'https://srcc.stanford.edu'
REGISTRY_NAME = "Tacosaurus Computing Center"
REGISTRY_URI = "taco"
-
# PERMISSIONS
# Allow users to create public collections
USER_COLLECTIONS = True
+# Limit users to N collections (None is unlimited)
+USER_COLLECTION_LIMIT = 2
+
# Should registries by default be private, with no option for public?
PRIVATE_ONLY = False
@@ -74,14 +78,13 @@
# After how many single containers should we switch to showing collections
# only? >= 1000
-VISUALIZATION_TREEMAP_COLLECTION_SWITCH=1000
-
+VISUALIZATION_TREEMAP_COLLECTION_SWITCH = 1000
# Logging
# Do you want to save complete response metadata per each pull?
# If you disable, we still keep track of collection pull counts, but not specific versions
-LOGGING_SAVE_RESPONSES=True
+LOGGING_SAVE_RESPONSES = True
# Plugins
# Add the name of a plugin under shub.plugins here to enable it
@@ -91,12 +94,14 @@
# Available Plugins:
# - ldap_auth: Allows sregistry to authenitcate against an LDAP directory
+# - google_build: a custom storage with that uses Google Cloud Build + Storage
# - pam_auth: Allow users from (docker) host to log in
# - globus: allows connection from sregistry to endpoints
-# - saml: authentication with SAML
+# - saml_auth: authentication with SAML
PLUGINS_ENABLED = [
# 'ldap_auth',
+# 'google_build'
# 'pam_auth',
# 'globus',
# 'saml_auth'
diff --git a/shub/settings/dummy_secrets.py b/shub/settings/dummy_secrets.py
index b47bdd9f..520e47ab 100644
--- a/shub/settings/dummy_secrets.py
+++ b/shub/settings/dummy_secrets.py
@@ -55,6 +55,41 @@
#SOCIAL_AUTH_GITLAB_KEY = ''
#SOCIAL_AUTH_GITLAB_SECRET = ''
+# =============================================================================
+# Google Cloud Build + Storage
+# Configure a custom builder and storage endpoint
+# =============================================================================
+
+# GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json"
+# SREGISTRY_GOOGLE_PROJECT="myproject-ftw"
+
+# SREGISTRY_GOOGLE_BUILD_CACHE="true"
+# After build, do not delete intermediate dependencies in cloudbuild bucket (keep them as cache for rebuild if needed).
+# Defaults to being unset, meaning that files are cleaned up. If you define this as anything, the build files will be cached.
+
+# SREGISTRY_GOOGLE_BUILD_LIMIT=100
+# To prevent denial of service attacks on Google Cloud Storage, you should set a reasonable limit for the number of active, concurrent builds.
+# This number should be based on your expected number of users, repositories, and recipes per repository.
+
+# SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION="v3.2.1-slim"
+# if you want to specify a version of Singularity. The version must coincide with a container tag hosted under singularityware/singularity. The version will default to 3.2.0-slim If you want to use a different version, update this variable.
+
+# SREGISTRY_GOOGLE_STORAGE_BUCKET="taco-singularity-registry"
+# is the name for the bucket you want to create. The example here is using the unique identifier appended with “sregistry-"
+# If you don't define it, it will default to a string that includes the hostname.
+# Additionally, a temporary bucket is created with the same name ending in _cloudbuild. This bucket is for build time dependencies, and is cleaned up after the fact. If you are having trouble getting a bucket it is likely because the name is taken,
+# and we recommend creating both and _cloudbuild in the console and then setting the name here.
+
+# SREGISTRY_GOOGLE_STORAGE_PRIVATE=True
+# by default, images that you upload will be made public, meaning that a user that stumbles on the URL (or has permission to read your bucket otherwise) will be able to see and download them. If you want to make images globally private you should export this variable as some derivative of yes/true. If no variable is found, images are made public by default.
+
+#SREGISTRY_GOOGLE_BUILD_TIMEOUT_SECONDS=None
+# The number of seconds for the build to timeout. If set to None, will be 10 minutes. If
+# unset, will default to 3 hours. This time should be less than the SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS
+
+# SREGISTRY_GOOGLE_BUILD_EXPIRE_SECONDS=28800
+# The number of seconds for the build to expire, meaning it's response is no longer accepted by the server. This must be defined.
+# The default 28800 indicates 8 hours (in seconds)
# -----------------------------------------------------------------------------
# Bitbucket OAuth2
diff --git a/shub/settings/main.py b/shub/settings/main.py
index 8cfe57aa..626e0597 100644
--- a/shub/settings/main.py
+++ b/shub/settings/main.py
@@ -13,10 +13,6 @@
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-# Set environment variable SINGULARITY_HUB to true, as indicator for singularity-python
-SINGULARITY_HUB = True
-os.environ['SINGULARITY_HUB'] = "%s" %SINGULARITY_HUB
-
# Custom user model
AUTH_USER_MODEL = 'users.User'
SOCIAL_AUTH_USER_MODEL = 'users.User'
diff --git a/shub/settings/tasks.py b/shub/settings/tasks.py
index 4a506d9b..27e41038 100644
--- a/shub/settings/tasks.py
+++ b/shub/settings/tasks.py
@@ -8,28 +8,20 @@
'''
-from kombu import Exchange, Queue
+from .config import PLUGINS_ENABLED
import os
-# CELERY SETTINGS
-REDIS_PORT = 6379
-REDIS_DB = 0
-REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'redis')
-
-# CELERY SETTINGS
-CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
-BROKER_URL = 'redis://redis:6379/0'
-CELERY_ACCEPT_CONTENT = ['json']
-CELERY_TASK_SERIALIZER = 'json'
-CELERY_RESULT_SERIALIZER = 'json'
-CELERY_DEFAULT_QUEUE = 'default'
-CELERY_QUEUES = (
- Queue('default', Exchange('default'), routing_key='default'),
-)
-CELERY_IMPORTS = ('shub.apps.api.tasks',)
-
-CELERY_RESULT_BACKEND = 'redis://%s:%d/%d' %(REDIS_HOST,REDIS_PORT,REDIS_DB)
-
-#BROKER_URL = os.environ.get('BROKER_URL',None)
-if BROKER_URL == None:
- BROKER_URL = CELERY_RESULT_BACKEND
+RQ_QUEUES = {
+ 'default': {
+ 'URL': os.getenv('REDIS_URL', 'redis://redis/0'),
+ }
+}
+
+RQ = {
+ 'host': 'redis',
+ 'db': 0,
+}
+
+
+# background tasks
+BACKGROUND_TASK_RUN_ASYNC = True
diff --git a/shub/urls.py b/shub/urls.py
index 3580ac6b..304fff13 100644
--- a/shub/urls.py
+++ b/shub/urls.py
@@ -10,40 +10,48 @@
from importlib import import_module
-from django.conf.urls import include, url
+from django.conf import settings
+from django.conf.urls import (include, url)
+from django.contrib import admin
+from django.contrib.sitemaps.views import sitemap, index
+from shub.apps.api import urls as api_urls
from shub.apps.base import urls as base_urls
+from shub.apps.library import urls as library_urls
from shub.apps.main import urls as main_urls
from shub.apps.users import urls as user_urls
-from shub.apps.api import urls as api_urls
-
-from django.contrib import admin
-from django.contrib.sitemaps.views import sitemap, index
+from shub.apps.base.sitemap import (
+ CollectionSitemap,
+ ContainerSitemap
+)
-from django.conf import settings
+from rest_framework.schemas import get_schema_view
+from rest_framework.documentation import include_docs_urls
+# Documentation URL
+API_TITLE = 'Singularity Registry API'
+API_DESCRIPTION = 'Open Source Container Registry API'
+schema_view = get_schema_view(title=API_TITLE)
# Configure custom error pages
-from django.conf.urls import ( handler404, handler500 )
handler404 = 'shub.apps.base.views.handler404'
handler500 = 'shub.apps.base.views.handler500'
# Sitemaps
-from shub.apps.base.sitemap import (
- CollectionSitemap,
- ContainerSitemap
-)
-
-sitemaps = {"containers":ContainerSitemap,
- "collections":CollectionSitemap}
+sitemaps = {"containers": ContainerSitemap,
+ "collections": CollectionSitemap}
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(base_urls)),
url(r'^api/', include(api_urls)),
+ url(r'^v1/', include(library_urls)), # Sylabs library API
+ url(r'^api/schema/$', schema_view),
+ url(r'^api/docs/', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION)),
url(r'^', include(main_urls)),
url(r'^', include(user_urls)),
url(r'^sitemap\.xml$', index, {'sitemaps': sitemaps}, name="sitemap"),
url(r'^sitemap-(?P.+)\.xml$', sitemap, {'sitemaps': sitemaps}),
+ url(r'^django-rq/', include('django_rq.urls'))
]
# Load URLs for any enabled plugins