Skip to content

Commit

Permalink
Merge pull request #68 from inbo/add_printouts_to_upload_files_direct
Browse files Browse the repository at this point in the history
Add printouts to upload files direct
  • Loading branch information
SanderDevisscher authored Jan 3, 2024
2 parents 90a28d9 + f5790c4 commit 7090670
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 58 deletions.
25 changes: 1 addition & 24 deletions .github/workflows/upload_files_direct.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,27 +86,4 @@ jobs:
AWS_SESSION_TOKEN: ${{ steps.assume_role.outputs.AWS_SESSION_TOKEN }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
S3_BUCKET: ${{ secrets.S3_BUCKET }}

- name: Commit and push changes
uses: devops-infra/action-commit-push@master
with:
github_token: ${{ secrets.AUTOMATISATION }}
commit_prefix: "[AUTO]"
commit_message: "upload files direct to UAT"
target_branch: upload files to UAT
add_timestamp: true

- name: Get branch name
run: |
git branch --show-current
- name: Create pull request
uses: devops-infra/action-pull-request@v0.4.2
with:
github_token: ${{ secrets.AUTOMATISATION }}
target_branch: uat
title: "[AUTO] upload files direct to UAT"
template: .github/PR_upload_files_to_UAT.md
reviewer: SanderDevisscher
label: automated workflow
get_diff: false

28 changes: 3 additions & 25 deletions .github/workflows/upload_files_processing.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
sudo apt install --yes libharfbuzz-dev libfribidi-dev
R --no-save -e 'install.packages("devtools")'
R --no-save -e 'devtools::install_github("inbo/INBOtheme@v0.5.9", force = TRUE)'
R --no-save -e 'devtools::install_github("inbo/alien-species-portal@main",
R --no-save -e 'devtools::install_github("inbo/alien-species-portal@sprint_v0.0.4",
subdir = "alienSpecies", force = TRUE)'
- name: Install R packages
Expand Down Expand Up @@ -84,28 +84,6 @@ jobs:
AWS_ACCESS_KEY_ID: ${{ steps.assume_role.outputs.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ steps.assume_role.outputs.AWS_SECRET_ACCESS_KEY }}
AWS_SESSION_TOKEN: ${{ steps.assume_role.outputs.AWS_SESSION_TOKEN }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
S3_BUCKET: ${{ secrets.S3_BUCKET }}

- name: Commit and push changes
uses: devops-infra/action-commit-push@master
with:
github_token: ${{ secrets.AUTOMATISATION }}
commit_prefix: "[AUTO]"
commit_message: "upload processed files to UAT"
target_branch: upload files to UAT
add_timestamp: true

- name: Get branch name
run: |
git branch --show-current
- name: Create pull request
uses: devops-infra/action-pull-request@v0.4.2
with:
github_token: ${{ secrets.AUTOMATISATION }}
target_branch: uat
title: "[AUTO] upload processed files to UAT"
template: .github/PR_upload_files_to_UAT.md
reviewer: SanderDevisscher
label: automated workflow
get_diff: false

18 changes: 14 additions & 4 deletions src/upload_direct_to_UAT.R
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,37 @@
#' needed to access the bucket.

# Libraries ####
print("libraries")
library(aws.s3)
library(testthat)

directFilePath <- "./data/output/UAT_direct"

# connect to bucket ####
source("./src/connect_to_bucket.R")

# run this code when you run this script locally
# print("source connect_to_bucket.R")
# source("./src/connect_to_bucket.R")
# connect_to_bucket(Sys.getenv("UAT_BUCKET"))
# get feedback ####

bucket <- Sys.getenv("S3_bucket")
# test S3_bucket ####
print("test S3_bucket")
if(Sys.getenv("S3_BUCKET") == ""){
stop("S3_bucket is not provided")
}

bucket <- paste0("s3://",Sys.getenv("S3_BUCKET"))
# bucket <- config::get("bucket", file = system.file("config.yml", package = "alienSpecies"))

print("get_bucket_df")
bucket_df <- get_bucket_df(bucket, region = "eu-west-1")
# test uploaded files ####
# A placeholder for a alienSpecies function to test the files on the bucket.


# files that are currently in management needs to be uploaded to the bucket
directFiles <- c("Oxyura_jamaicensis.csv", "Lithobates_catesbeianus.csv", "Ondatra_zibethicus.csv", "translations.csv")

print("lapply put_object")
lapply(directFiles, function(fileName){

put_object(file.path(directFilePath, fileName),
Expand Down
26 changes: 21 additions & 5 deletions src/upload_processing_to_UAT.R
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,17 @@ library(testthat)
processingFilePath <- "./data/output/UAT_processing"

# connect to bucket ####
source("./src/connect_to_bucket.R")
#source("./src/connect_to_bucket.R")

bucket <- Sys.getenv("UAT_bucket")
Sys.setenv("AWS_DEFAULT_REGION" = "eu-west-1")
print("test S3_bucket")
if(Sys.getenv("S3_BUCKET") == ""){
stop("S3_bucket is not provided")
}

connect_to_bucket(bucket)
bucket <- paste0("s3://",Sys.getenv("S3_BUCKET"))
#Sys.setenv("AWS_DEFAULT_REGION" = "eu-west-1")

#connect_to_bucket(bucket) #=> run this before continuing locally

###############################################################
## The following create* function will take input data,
Expand All @@ -33,45 +38,56 @@ connect_to_bucket(bucket)

# input: folder grid containing gewestbel shape data
# output: grid.RData
print("grid")
createShapeData(dataDir = file.path(processingFilePath, "grid"), bucket = bucket)

# input Vespa_velutina_shape" folder containing shape data
# output: Vespa_velutina_shape.RData
print("Vespa velutina")
createShapeData(dataDir = file.path(processingFilePath,"Vespa_velutina_shape"), bucket = bucket)

# input: folder occurrenceCube containing be_1km and be_20 km shape data
# output: occurrenceCube.RData
print("occurrenceCube")
createShapeData(dataDir = file.path(processingFilePath,"occurrenceCube"), bucket = bucket)

# output: provinces.RData
print("provinces")
createShapeData(dataDir = file.path(processingFilePath,"provinces.geojson"), bucket = bucket)

# output: communes.RData
print("communes")
createShapeData(dataDir = file.path(processingFilePath,"communes.geojson"), bucket = bucket)

# create key data
# input: "be_alientaxa_info.csv"
# output: "keys.csv"
print("key data")
createKeyData(dataDir = processingFilePath, bucket = bucket)

# create occupancy cube

# input: trendOccupancy folder containing T1* and ias_belgium_t0_2016/18/20 geojson data
# output: dfCube.RData
print("dfcube")
createOccupancyCube(file.path(processingFilePath, "trendOccupancy"), bucket = bucket)

# create tabular data
# input: data_input_checklist_indicators.tsv/eu_concern_species.tsv/be_alientaxa_cube.csv
# output: "eu_concern_species_processed.RData"/"data_input_checklist_indicators_processed.RData"/ "be_alientaxa_cube_processed.RData"

print("tabular data")
print("indicators")
createTabularData(dataDir = processingFilePath, type = "indicators", bucket = bucket)
print("unionlist")
createTabularData(dataDir = processingFilePath, type = "unionlist", bucket = bucket)
print("occurrence")
createTabularData(dataDir = processingFilePath, type = "occurrence", bucket = bucket)

###################################################
# test if all the data files needed are on bucket #
# and can be read into R #
###################################################
print("tests")

test_that("Load shape data", {

Expand Down

0 comments on commit 7090670

Please sign in to comment.