diff --git a/doc/release-notes/6784-mdc-scripts.md b/doc/release-notes/6784-mdc-scripts.md
new file mode 100644
index 00000000000..1d913d51c87
--- /dev/null
+++ b/doc/release-notes/6784-mdc-scripts.md
@@ -0,0 +1 @@
+In the "Notes for Dataverse Installation Administrators" we should mention the new scripts for MDC.
\ No newline at end of file
diff --git a/doc/release-notes/7140-google-cloud.md b/doc/release-notes/7140-google-cloud.md
new file mode 100644
index 00000000000..62aef73acd0
--- /dev/null
+++ b/doc/release-notes/7140-google-cloud.md
@@ -0,0 +1,12 @@
+## Google Cloud Archiver
+
+Dataverse Bags can now be sent to a bucket in Google Cloud, including those in the 'Coldline' storage class, which provide less expensive but slower access.
+
+## Use Cases
+
+- As an Administrator I can set up a regular export to Google Cloud so that my users' data is preserved.
+
+## New Settings
+
+:GoogleCloudProject - the name of the project managing the bucket.
+:GoogleCloudBucket - the name of the bucket to use
\ No newline at end of file
diff --git a/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml b/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
index 7db432c4bbc..4f338905751 100644
--- a/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
+++ b/doc/sphinx-guides/source/_static/admin/counter-processor-config.yaml
@@ -2,7 +2,7 @@
# 4-digit year and 2-digit month and day
# /usr/local/payara5/glassfish/domains/domain1/logs/counter_2019-01-11.log
#log_name_pattern: sample_logs/counter_(yyyy-mm-dd).log
-log_name_pattern: /usr/local/payara5/glassfish/domains/domain1/logs/counter_(yyyy-mm-dd).log
+log_name_pattern: /usr/local/payara5/glassfish/domains/domain1/logs/mdc/counter_(yyyy-mm-dd).log
# path_types regular expressions allow matching to classify page urls as either an investigation or request
# based on specific URL structure for your system.
diff --git a/doc/sphinx-guides/source/_static/util/counter_daily.sh b/doc/sphinx-guides/source/_static/util/counter_daily.sh
new file mode 100644
index 00000000000..597ff0ac737
--- /dev/null
+++ b/doc/sphinx-guides/source/_static/util/counter_daily.sh
@@ -0,0 +1,36 @@
+#! /bin/bash
+
+COUNTER_PROCESSOR_DIRECTORY="/usr/local/counter-processor-0.0.1"
+MDC_LOG_DIRECTORY="/usr/local/payara5/glassfish/domains/domain1/logs/mdc"
+
+# counter_daily.sh
+
+cd $COUNTER_PROCESSOR_DIRECTORY
+
+echo >>/tmp/counter_daily.log
+date >>/tmp/counter_daily.log
+echo >>/tmp/counter_daily.log
+
+# "You should run Counter Processor once a day to create reports in SUSHI (JSON) format that are saved to disk for Dataverse to process and that are sent to the DataCite hub."
+
+LAST=$(date -d "yesterday 13:00" '+%Y-%m-%d')
+# echo $LAST
+YEAR_MONTH=$(date -d "yesterday 13:00" '+%Y-%m')
+# echo $YEAR_MONTH
+d=$(date -I -d "$YEAR_MONTH-01")
+#echo $d
+while [ "$(date -d "$d" +%Y%m%d)" -le "$(date -d "$LAST" +%Y%m%d)" ];
+do
+ if [ -f "$MDC_LOG_DIRECTORY/counter_$d.log" ]; then
+# echo "Found counter_$d.log"
+ else
+ touch "$MDC_LOG_DIRECTORY/counter_$d.log"
+ fi
+ d=$(date -I -d "$d + 1 day")
+done
+
+#run counter-processor as counter user
+
+sudo -u counter YEAR_MONTH=$YEAR_MONTH python3 main.py >>/tmp/counter_daily.log
+
+curl -X POST "http://localhost:8080/api/admin/makeDataCount/addUsageMetricsFromSushiReport?reportOnDisk=/tmp/make-data-count-report.json"
diff --git a/doc/sphinx-guides/source/_static/util/counter_weekly.sh b/doc/sphinx-guides/source/_static/util/counter_weekly.sh
new file mode 100644
index 00000000000..7b63567cf7c
--- /dev/null
+++ b/doc/sphinx-guides/source/_static/util/counter_weekly.sh
@@ -0,0 +1,48 @@
+#!/bin/sh
+#counter_weekly.sh
+
+# This script iterates through all published Datasets in all Dataverses and calls the Make Data Count API to update their citations from DataCite
+# Note: Requires curl and jq for parsing JSON responses form curl
+
+# A recursive method to process each Dataverse
+processDV () {
+echo "Processing Dataverse ID#: $1"
+
+#Call the Dataverse API to get the contents of the Dataverse (without credentials, this will only list published datasets and dataverses
+DVCONTENTS=$(curl -s http://localhost:8080/api/dataverses/$1/contents)
+
+# Iterate over all datasets, pulling the value of their DOIs (as part of the persistentUrl) from the json returned
+for subds in $(echo "${DVCONTENTS}" | jq -r '.data[] | select(.type == "dataset") | .persistentUrl'); do
+
+#The authority/identifier are preceded by a protocol/host, i.e. https://doi.org/
+DOI=`expr "$subds" : '.*:\/\/\doi\.org\/\(.*\)'`
+
+# Call the Dataverse API for this dataset and get the response
+RESULT=$(curl -s -X POST "http://localhost:8080/api/admin/makeDataCount/:persistentId/updateCitationsForDataset?persistentId=doi:$DOI" )
+# Parse the status and number of citations found from the response
+STATUS=$(echo "$RESULT" | jq -j '.status' )
+CITATIONS=$(echo "$RESULT" | jq -j '.data.citationCount')
+
+# The status for a call that worked
+OK='OK'
+
+# Check the status and report
+if [ "$STATUS" = "$OK" ]; then
+ echo "Updated: $CITATIONS citations for doi:$DOI"
+else
+ echo "Failed to update citations for doi:$DOI"
+ echo "Run curl -s -X POST 'http://localhost:8080/api/admin/makeDataCount/:persistentId/updateCitationsForDataset?persistentId=doi:$DOI ' to retry/see the error message"
+fi
+#processDV $subds
+done
+
+# Now iterate over any child Dataverses and recursively process them
+for subdv in $(echo "${DVCONTENTS}" | jq -r '.data[] | select(.type == "dataverse") | .id'); do
+echo $subdv
+processDV $subdv
+done
+
+}
+
+# Call the function on the root dataverse to start processing
+processDV 1
diff --git a/doc/sphinx-guides/source/admin/make-data-count.rst b/doc/sphinx-guides/source/admin/make-data-count.rst
index 132af23418a..a432a27fbe7 100644
--- a/doc/sphinx-guides/source/admin/make-data-count.rst
+++ b/doc/sphinx-guides/source/admin/make-data-count.rst
@@ -72,6 +72,8 @@ Enable or Disable Display of Make Data Count Metrics
By default, when MDC logging is enabled (when ``:MDCLogPath`` is set), Dataverse will display MDC metrics instead of it's internal (legacy) metrics. You can avoid this (e.g. to collect MDC metrics for some period of time before starting to display them) by setting ``:DisplayMDCMetrics`` to false.
+The following discussion assumes ``:MDCLogPath`` has been set to ``/usr/local/payara5/glassfish/domains/domain1/logs/mdc``
+
Configure Counter Processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -92,7 +94,7 @@ Configure Counter Processor
Populate Views and Downloads for the First Time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Soon we will be setting up a cron job to run nightly but we start with a single successful configuration and run of Counter Processor and calls to Dataverse APIs.
+Soon we will be setting up a cron job to run nightly but we start with a single successful configuration and manual run of Counter Processor and calls to Dataverse APIs. (The scripts discussed in the next section automate the steps described here, including creating empty log files if you're starting mid-month.)
* Change to the directory where you installed Counter Processor.
@@ -100,7 +102,7 @@ Soon we will be setting up a cron job to run nightly but we start with a single
* If you are running Counter Processor for the first time in the middle of a month, you will need create blank log files for the previous days. e.g.:
- * ``cd /usr/local/payara5/glassfish/domains/domain1/logs``
+ * ``cd /usr/local/payara5/glassfish/domains/domain1/logs/mdc``
* ``touch counter_2019-02-01.log``
@@ -127,6 +129,8 @@ Populate Views and Downloads Nightly
Running ``main.py`` to create the SUSHI JSON file and the subsequent calling of the Dataverse API to process it should be added as a cron job.
+Dataverse provides example scripts that run the steps to process new accesses and uploads and update Dataverse's database (`counter_daily.sh`) and to retrieve citations for all Datasets from DataCite (`counter_weekly.sh`). These scripts should be configured for your environment and can be run manually or as cron jobs.
+
Sending Usage Metrics to the DataCite Hub
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinx-guides/source/installation/config.rst b/doc/sphinx-guides/source/installation/config.rst
index ddd5305cf21..58c625e0845 100644
--- a/doc/sphinx-guides/source/installation/config.rst
+++ b/doc/sphinx-guides/source/installation/config.rst
@@ -776,6 +776,8 @@ For Google Analytics, the example script at :download:`analytics-code.html `_ serialized `OAI-ORE `_ map file, which is also available as a metadata export format in the Dataverse web interface.
-At present, the DPNSubmitToArchiveCommand and LocalSubmitToArchiveCommand are the only implementations extending the AbstractSubmitToArchiveCommand and using the configurable mechanisms discussed below.
+At present, the DPNSubmitToArchiveCommand, LocalSubmitToArchiveCommand, and GoogleCloudSubmitToArchive are the only implementations extending the AbstractSubmitToArchiveCommand and using the configurable mechanisms discussed below.
.. _Duracloud Configuration:
@@ -831,10 +833,41 @@ ArchiverClassName - the fully qualified class to be used for archiving. For exam
\:ArchiverSettings - the archiver class can access required settings including existing Dataverse settings and dynamically defined ones specific to the class. This setting is a comma-separated list of those settings. For example\:
-``curl http://localhost:8080/api/admin/settings/:ArchiverSettings -X PUT -d ":BagItLocalPath”``
+``curl http://localhost:8080/api/admin/settings/:ArchiverSettings -X PUT -d ":BagItLocalPath"``
:BagItLocalPath is the file path that you've set in :ArchiverSettings.
+.. _Google Cloud Configuration:
+
+Google Cloud Configuration
+++++++++++++++++++++++++++
+
+The Google Cloud Archiver can send Dataverse Bags to a bucket in Google's cloud, including those in the 'Coldline' storage class (cheaper, with slower access)
+
+``curl http://localhost:8080/api/admin/settings/:ArchiverClassName -X PUT -d "edu.harvard.iq.dataverse.engine.command.impl.GoogleCloudSubmitToArchiveCommand"``
+
+``curl http://localhost:8080/api/admin/settings/:ArchiverSettings -X PUT -d ":GoogleCloudBucket, :GoogleCloudProject"``
+
+The Google Cloud Archiver defines two custom settings, both are required. The credentials for your account, in the form of a json key file, must also be obtained and stored locally (see below):
+
+In order to use the Google Cloud Archiver, you must have a Google account. You will need to create a project and bucket within that account and provide those values in the settings:
+
+\:GoogleCloudBucket - the name of the bucket to use. For example:
+
+``curl http://localhost:8080/api/admin/settings/:GoogleCloudBucket -X PUT -d "qdr-archive"``
+
+\:GoogleCloudProject - the name of the project managing the bucket. For example:
+
+``curl http://localhost:8080/api/admin/settings/:GoogleCloudProject -X PUT -d "qdr-project"``
+
+The Google Cloud Archiver also requires a key file that must be renamed to 'googlecloudkey.json' and placed in the directory identified by your 'dataverse.files.directory' jvm option. This file can be created in the Google Cloud Console. (One method: Navigate to your Project 'Settings'/'Service Accounts', create an account, give this account the 'Cloud Storage'/'Storage Admin' role, and once it's created, use the 'Actions' menu to 'Create Key', selecting the 'JSON' format option. Use this as the 'googlecloudkey.json' file.)
+
+For example:
+
+``cp /usr/local/payara5/glassfish/domains/domain1/files/googlecloudkey.json``
+
+.. _Archiving API Call:
+
API Call
++++++++
@@ -2124,3 +2157,40 @@ To enable redirects to the zipper installed on the same server as the main Datav
To enable redirects to the zipper on a different server:
``curl -X PUT -d 'https://zipper.example.edu/cgi-bin/zipdownload' http://localhost:8080/api/admin/settings/:CustomZipDownloadServiceUrl``
+
+:ArchiverClassName
+++++++++++++++++++
+
+Dataverse can export archival "Bag" files to an extensible set of storage systems (see :ref:`BagIt Export` above for details about this and for further explanation of the other archiving related settings below).
+This setting specifies which storage system to use by identifying the particular Java class that should be run. Current options include DuraCloudSubmitToArchiveCommand, LocalSubmitToArchiveCommand, and GoogleCloudSubmitToArchiveCommand.
+
+``curl -X PUT -d 'LocalSubmitToArchiveCommand' http://localhost:8080/api/admin/settings/:ArchiverClassName``
+
+:ArchiverSettings
++++++++++++++++++
+
+Each Archiver class may have its own custom settings. Along with setting which Archiver class to use, one must use this setting to identify which setting values should be sent to it when it is invoked. The value should be a comma-separated list of setting names.
+For example, the LocalSubmitToArchiveCommand only uses the :BagItLocalPath setting. To allow the class to use that setting, this setting must set as:
+
+``curl -X PUT -d ':BagItLocalPath' http://localhost:8080/api/admin/settings/:ArchiverSettings``
+
+:DuraCloudHost
+++++++++++++++
+:DuraCloudPort
+++++++++++++++
+:DuraCloudContext
++++++++++++++++++
+
+These three settings define the host, port, and context used by the DuraCloudSubmitToArchiveCommand. :DuraCloudHost is required. The other settings have default values as noted in the :ref:`Duracloud Configuration` section above.
+
+:BagItLocalPath
++++++++++++++++
+
+This is the local file system path to be used with the LocalSubmitToArchiveCommand class. It is recommended to use an absolute path. See the :ref:`Local Path Configuration` section above.
+
+:GoogleCloudBucket
+++++++++++++++++++
+:GoogleCloudProject
++++++++++++++++++++
+
+These are the bucket and project names to be used with the GoogleCloudSubmitToArchiveCommand class. Further information is in the :ref:`Google Cloud Configuration` section above.
diff --git a/pom.xml b/pom.xml
index 36756abdf4f..0812b78a778 100644
--- a/pom.xml
+++ b/pom.xml
@@ -30,8 +30,8 @@
1.11.7621.24.5.5
- 4.12
- 5.5.2
+ 4.13.1
+ 5.7.0${junit.jupiter.version}1.13.02.28.2
@@ -57,7 +57,7 @@
-
@@ -127,6 +127,13 @@
httpclient${httpcomponents.client.version}
+
+ com.google.cloud
+ google-cloud-bom
+ 0.115.0-alpha
+ pom
+ import
+ org.testcontainerstestcontainers-bom
@@ -137,7 +144,7 @@
@@ -440,11 +447,6 @@
slf4j-log4j121.6.1
-
- axis
- axis
- 1.4
- io.searchboxjest
@@ -573,7 +575,7 @@
org.apache.tikatika-parsers
- 1.22
+ 1.24.1
@@ -581,6 +583,11 @@
opennlp-tools1.9.1
+
+ com.google.cloud
+ google-cloud-storage
+ 1.97.0
+
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
index 65d26d2eb63..a9a16e60ae2 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFileServiceBean.java
@@ -578,7 +578,7 @@ public void findFileMetadataOptimizedExperimental(Dataset owner, DatasetVersion
int i = 0;
- List