diff --git a/dsub/_dsub_version.py b/dsub/_dsub_version.py index 71bbde4..050f045 100644 --- a/dsub/_dsub_version.py +++ b/dsub/_dsub_version.py @@ -27,5 +27,5 @@ """ -DSUB_VERSION = '0.5.2.dev0' +DSUB_VERSION = '0.5.2' diff --git a/test/integration/e2e_logging_paths.sh b/test/integration/e2e_logging_paths.sh index 7cac80c..314cfd5 100755 --- a/test/integration/e2e_logging_paths.sh +++ b/test/integration/e2e_logging_paths.sh @@ -27,7 +27,7 @@ readonly LOGGING_BASE="$(dirname "${LOGGING}")" declare LOGGING_OVERRIDE readonly JOB_NAME="log-test" -readonly JOB_USER="${USER}" +readonly JOB_USER="${USER:-$(whoami)}" # Test a basic job with base logging path echo "Subtest #1: Basic logging path" diff --git a/test/integration/e2e_logging_paths_pattern_tasks.sh b/test/integration/e2e_logging_paths_pattern_tasks.sh index 0150fc0..5524c01 100755 --- a/test/integration/e2e_logging_paths_pattern_tasks.sh +++ b/test/integration/e2e_logging_paths_pattern_tasks.sh @@ -31,7 +31,7 @@ readonly LOGGING_BASE="$(dirname "${LOGGING}")" declare LOGGING_OVERRIDE readonly JOB_NAME=$(logging_paths_tasks_setup::get_job_name) -readonly JOB_USER="${USER}" +readonly JOB_USER="${USER:-$(whoami)}" # Set up the tasks file logging_paths_tasks_setup::write_tasks_file diff --git a/test/integration/io_setup.sh b/test/integration/io_setup.sh index 79d2a24..6cf7caf 100644 --- a/test/integration/io_setup.sh +++ b/test/integration/io_setup.sh @@ -26,6 +26,8 @@ readonly INPUT_BAM_MD5="4afb9b8908959dbd4e2d5c54bf254c93" readonly REQUESTER_PAYS_INPUT_BAM_FULL_PATH="gs://${DSUB_BUCKET_REQUESTER_PAYS}/${INPUT_BAM_FILE}" readonly REQUESTER_PAYS_POPULATION_FILE_FULL_PATH="gs://${DSUB_BUCKET_REQUESTER_PAYS}/${POPULATION_FILE}" +# Set user variable like in other tests +readonly JOB_USER="${USER:-$(whoami)}" # This is the image we use to test the PD mount feature. # Inject the TEST_TOKEN into the name so that multiple tests can run # concurrently. Since the image test can be run multiple times for one @@ -230,7 +232,7 @@ function io_setup::check_dstat() { local dstat_output=$(run_dstat --status '*' --jobs "${job_id}" --full) echo " Checking user-id" - util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].user-id" "${USER}" + util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].user-id" "${JOB_USER}" echo " Checking logging" util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].logging" "${LOGGING}" diff --git a/test/integration/io_tasks_setup.sh b/test/integration/io_tasks_setup.sh index f44f867..9b93627 100644 --- a/test/integration/io_tasks_setup.sh +++ b/test/integration/io_tasks_setup.sh @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +readonly JOB_USER="${USER:-$(whoami)}" readonly POPULATION_FILE="gs://genomics-public-data/ftp-trace.ncbi.nih.gov/1000genomes/ftp/20131219.superpopulations.tsv" readonly POPULATION_MD5="68a73f849b82071afe11888bac1aa8a7" @@ -119,7 +120,7 @@ function io_tasks_setup::check_dstat() { echo " Check task ${task_id}" echo " Checking user-id" - util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].user-id" "${USER}" + util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].user-id" "${JOB_USER}" echo " Checking logging" util::dstat_yaml_assert_field_equal "${dstat_output}" "[0].logging" "${LOGGING}/${job_id}.${task_id}.log" diff --git a/test/integration/test_setup.sh b/test/integration/test_setup.sh index e60f78a..769e0f6 100644 --- a/test/integration/test_setup.sh +++ b/test/integration/test_setup.sh @@ -30,6 +30,9 @@ # * Provide functions run_dsub, run_dstat, run_ddel which will call a function # with DSUB_PROVIDER-specific default parameters set. +# Set default USER if not already set (needed for Jupyterlab/Docker environments) +export USER="${USER:-$(whoami)}" + # If the DSUB_PROVIDER is not set, figure it out from the name of the script. # If the script name is ..sh, pull out the provider. # If the script name is .sh, use "local". @@ -89,16 +92,23 @@ function run_dsub() { } function dsub_google-batch() { - local location="${LOCATION:-}" + # Use REGIONS env var if set, otherwise fall back to LOCATION + local location="${LOCATION:-${REGIONS:-}}" + + # Use environment variables for VPC-SC configuration if set + local network="${GPU_NETWORK:-global/networks/default}" + local subnetwork="${GPU_SUBNETWORK:-regions/us-central1/subnetworks/default}" + local service_account="${PET_SA_EMAIL:-}" dsub \ --provider google-batch \ --project "${PROJECT_ID}" \ ${location:+--location "${location}"} \ --logging "${LOGGING_OVERRIDE:-${LOGGING}}" \ - --network "global/networks/default" \ - --subnetwork "regions/us-central1/subnetworks/default" \ + --network "${network}" \ + --subnetwork "${subnetwork}" \ --use-private-address \ + ${service_account:+--service-account "${service_account}"} \ "${@}" } diff --git a/test/integration/test_setup_e2e.py b/test/integration/test_setup_e2e.py index 8f01a5f..898d878 100644 --- a/test/integration/test_setup_e2e.py +++ b/test/integration/test_setup_e2e.py @@ -171,16 +171,27 @@ def dsub_google_batch(dsub_args): if val: opt_args.append(var[1], val) - # pyformat: disable - return dsub_command.call([ + # Use environment variables for VPC-SC configuration if set + network = os.environ.get("GPU_NETWORK", "global/networks/default") + subnetwork = os.environ.get("GPU_SUBNETWORK", + "regions/us-central1/subnetworks/default") + location = os.environ.get("LOCATION", os.environ.get("REGIONS", "us-central1")) + service_account = os.environ.get("PET_SA_EMAIL", "") + + args = [ "--provider", "google-batch", "--project", PROJECT_ID, "--logging", LOGGING, - "--regions", "us-central1", - "--network", "global/networks/default", - "--subnetwork", "regions/us-central1/subnetworks/default", + "--regions", location, + "--network", network, + "--subnetwork", subnetwork, "--use-private-address" - ] + opt_args + dsub_args) + ] + if service_account: + args.extend(["--service-account", service_account]) + + # pyformat: disable + return dsub_command.call(args + opt_args + dsub_args) # pyformat: enable