From 26da7a7d4c1f5db2d2c3b2faedccbd9899c14a47 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 16 Nov 2020 15:49:29 -0800 Subject: [PATCH 01/34] feat: add get_custom_job and get_hyperparameter_tuning_job samples (#68) * feat: add get_custom_job and get_hyperparameter_tuning_job samples --- samples/snippets/get_custom_job_sample.py | 36 ++++++++++++++++++ .../snippets/get_custom_job_sample_test.py | 30 +++++++++++++++ .../get_hyperparameter_tuning_job_sample.py | 38 +++++++++++++++++++ ...t_hyperparameter_tuning_job_sample_test.py | 32 ++++++++++++++++ 4 files changed, 136 insertions(+) create mode 100644 samples/snippets/get_custom_job_sample.py create mode 100644 samples/snippets/get_custom_job_sample_test.py create mode 100644 samples/snippets/get_hyperparameter_tuning_job_sample.py create mode 100644 samples/snippets/get_hyperparameter_tuning_job_sample_test.py diff --git a/samples/snippets/get_custom_job_sample.py b/samples/snippets/get_custom_job_sample.py new file mode 100644 index 0000000000..4fcce9aa16 --- /dev/null +++ b/samples/snippets/get_custom_job_sample.py @@ -0,0 +1,36 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_get_custom_job_sample] +from google.cloud import aiplatform + + +def get_custom_job_sample( + project: str, + custom_job_id: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + name = client.custom_job_path( + project=project, location=location, custom_job=custom_job_id + ) + response = client.get_custom_job(name=name) + print("response:", response) + + +# [END aiplatform_get_custom_job_sample] diff --git a/samples/snippets/get_custom_job_sample_test.py b/samples/snippets/get_custom_job_sample_test.py new file mode 100644 index 0000000000..5e11f2d8da --- /dev/null +++ b/samples/snippets/get_custom_job_sample_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import get_custom_job_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +CUSTOM_JOB_ID = "7980906305281851392" +KNOWN_CUSTOM_JOB = f"/locations/us-central1/customJobs/{CUSTOM_JOB_ID}" + + +def test_ucaip_generated_get_custom_job_sample(capsys): + get_custom_job_sample.get_custom_job_sample( + project=PROJECT_ID, custom_job_id=CUSTOM_JOB_ID + ) + out, _ = capsys.readouterr() + assert KNOWN_CUSTOM_JOB in out diff --git a/samples/snippets/get_hyperparameter_tuning_job_sample.py b/samples/snippets/get_hyperparameter_tuning_job_sample.py new file mode 100644 index 0000000000..a9378533a1 --- /dev/null +++ b/samples/snippets/get_hyperparameter_tuning_job_sample.py @@ -0,0 +1,38 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_get_hyperparameter_tuning_job_sample] +from google.cloud import aiplatform + + +def get_hyperparameter_tuning_job_sample( + project: str, + hyperparameter_tuning_job_id: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + name = client.hyperparameter_tuning_job_path( + project=project, + location=location, + hyperparameter_tuning_job=hyperparameter_tuning_job_id, + ) + response = client.get_hyperparameter_tuning_job(name=name) + print("response:", response) + + +# [END aiplatform_get_hyperparameter_tuning_job_sample] diff --git a/samples/snippets/get_hyperparameter_tuning_job_sample_test.py b/samples/snippets/get_hyperparameter_tuning_job_sample_test.py new file mode 100644 index 0000000000..839d056b43 --- /dev/null +++ b/samples/snippets/get_hyperparameter_tuning_job_sample_test.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import get_hyperparameter_tuning_job_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +HYPERPARAMETER_TUNING_JOB_ID = "2216298782247616512" +KNOWN_HYPERPARAMETER_TUNING_JOB = ( + f"/locations/us-central1/hyperparameterTuningJobs/{HYPERPARAMETER_TUNING_JOB_ID}" +) + + +def test_ucaip_generated_get_hyperparameter_tuning_job_sample(capsys): + get_hyperparameter_tuning_job_sample.get_hyperparameter_tuning_job_sample( + project=PROJECT_ID, hyperparameter_tuning_job_id=HYPERPARAMETER_TUNING_JOB_ID + ) + out, _ = capsys.readouterr() + assert KNOWN_HYPERPARAMETER_TUNING_JOB in out From 1d71652e94ad48ec99151bd109d4ad5273e253ce Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 17 Nov 2020 02:15:23 +0100 Subject: [PATCH 02/34] chore(deps): update dependency google-cloud-aiplatform to v0.3.1 (#74) --- samples/snippets/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt index c448d3d434..27bb30c3d0 100644 --- a/samples/snippets/requirements.txt +++ b/samples/snippets/requirements.txt @@ -1,3 +1,3 @@ pytest==6.0.1 google-cloud-storage>=1.26.0, <2.0.0dev -google-cloud-aiplatform==0.3.0 +google-cloud-aiplatform==0.3.1 From f79c0f4d182c7d6d60a3ac614b8a81f86168a69a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 17 Nov 2020 03:58:25 +0100 Subject: [PATCH 03/34] chore(deps): update dependency pytest to v6.1.2 (#65) --- samples/snippets/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt index 27bb30c3d0..e9b3e8a556 100644 --- a/samples/snippets/requirements.txt +++ b/samples/snippets/requirements.txt @@ -1,3 +1,3 @@ -pytest==6.0.1 +pytest==6.1.2 google-cloud-storage>=1.26.0, <2.0.0dev google-cloud-aiplatform==0.3.1 From 7daacd576dc96149c05e2908f276831337076316 Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Tue, 17 Nov 2020 11:50:02 -0800 Subject: [PATCH 04/34] feat: add data_labeling samples (#78) --- ...ata_labeling_job_active_learning_sample.py | 60 ++++++++++++ ..._data_labeling_job_active_learning_test.py | 93 ++++++++++++++++++ ..._labeling_job_image_segmentation_sample.py | 58 +++++++++++ ...ta_labeling_job_image_segmentation_test.py | 96 ++++++++++++++++++ ...ata_labeling_job_specialist_pool_sample.py | 60 ++++++++++++ ..._data_labeling_job_specialist_pool_test.py | 98 +++++++++++++++++++ 6 files changed, 465 insertions(+) create mode 100644 samples/snippets/create_data_labeling_job_active_learning_sample.py create mode 100644 samples/snippets/create_data_labeling_job_active_learning_test.py create mode 100644 samples/snippets/create_data_labeling_job_image_segmentation_sample.py create mode 100644 samples/snippets/create_data_labeling_job_image_segmentation_test.py create mode 100644 samples/snippets/create_data_labeling_job_specialist_pool_sample.py create mode 100644 samples/snippets/create_data_labeling_job_specialist_pool_test.py diff --git a/samples/snippets/create_data_labeling_job_active_learning_sample.py b/samples/snippets/create_data_labeling_job_active_learning_sample.py new file mode 100644 index 0000000000..86360b7b34 --- /dev/null +++ b/samples/snippets/create_data_labeling_job_active_learning_sample.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_data_labeling_job_active_learning_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_data_labeling_job_active_learning_sample( + project: str, + display_name: str, + dataset: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + inputs_dict = {"annotation_specs": [annotation_spec]} + inputs = json_format.ParseDict(inputs_dict, Value()) + + active_learning_config = {"max_data_item_count": 1} + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_active_learning" + }, + "active_learning_config": active_learning_config, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_data_labeling_job( + parent=parent, data_labeling_job=data_labeling_job + ) + print("response:", response) + + +# [END aiplatform_create_data_labeling_job_active_learning_sample] diff --git a/samples/snippets/create_data_labeling_job_active_learning_test.py b/samples/snippets/create_data_labeling_job_active_learning_test.py new file mode 100644 index 0000000000..38a7c0c1c0 --- /dev/null +++ b/samples/snippets/create_data_labeling_job_active_learning_test.py @@ -0,0 +1,93 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os +import uuid +from google.cloud import aiplatform + +import helpers + +import create_data_labeling_job_active_learning_sample + +API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +DATASET_ID = "1905673553261363200" +INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml" +DISPLAY_NAME = f"temp_create_data_labeling_job_active_learning_test_{uuid.uuid4()}" + +INSTRUCTIONS_GCS_URI = ( + "gs://ucaip-sample-resources/images/datalabeling_instructions.pdf" +) +ANNOTATION_SPEC = "rose" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(capsys, shared_state, job_client): + yield + + job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) + + # Verify Data Labelling Job is cancelled, or timeout after 400 seconds + helpers.wait_for_job_state( + get_job_method=job_client.get_data_labeling_job, + name=shared_state["data_labeling_job_name"], + timeout=400, + freq=10, + ) + + # Delete the data labeling job + response = job_client.delete_data_labeling_job( + name=shared_state["data_labeling_job_name"] + ) + + print("Delete LRO:", response.operation.name) + delete_data_labeling_job_response = response.result(timeout=300) + print("delete_data_labeling_job_response", delete_data_labeling_job_response) + + out, _ = capsys.readouterr() + assert "delete_data_labeling_job_response" in out + + +# Creating a data labeling job for images +def test_create_data_labeling_job_active_learning_sample(capsys, shared_state): + + create_data_labeling_job_active_learning_sample.create_data_labeling_job_active_learning_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset=f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}", + instruction_uri=INSTRUCTIONS_GCS_URI, + inputs_schema_uri=INPUTS_SCHEMA_URI, + annotation_spec=ANNOTATION_SPEC, + api_endpoint=API_ENDPOINT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created data labeing job + shared_state["data_labeling_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_sample.py b/samples/snippets/create_data_labeling_job_image_segmentation_sample.py new file mode 100644 index 0000000000..94a80b9dd8 --- /dev/null +++ b/samples/snippets/create_data_labeling_job_image_segmentation_sample.py @@ -0,0 +1,58 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_data_labeling_job_image_segmentation_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_data_labeling_job_image_segmentation_sample( + project: str, + display_name: str, + dataset: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: dict, + annotation_set_name: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + inputs_dict = {"annotationSpecColors": [annotation_spec]} + inputs = json_format.ParseDict(inputs_dict, Value()) + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": annotation_set_name + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_data_labeling_job( + parent=parent, data_labeling_job=data_labeling_job + ) + print("response:", response) + + +# [END aiplatform_create_data_labeling_job_image_segmentation_sample] diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_test.py b/samples/snippets/create_data_labeling_job_image_segmentation_test.py new file mode 100644 index 0000000000..79f40d949c --- /dev/null +++ b/samples/snippets/create_data_labeling_job_image_segmentation_test.py @@ -0,0 +1,96 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os +import uuid +from google.cloud import aiplatform + +import helpers + +import create_data_labeling_job_image_segmentation_sample + +API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +DATASET_ID = "5111009432972558336" +INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_segmentation_1.0.0.yaml" +DISPLAY_NAME = f"temp_create_data_labeling_job_image_segmentation_test_{uuid.uuid4()}" + +INSTRUCTIONS_GCS_URI = ( + "gs://ucaip-sample-resources/images/datalabeling_instructions.pdf" +) +ANNOTATION_SPEC = {"color": {"red": 1.0}, "displayName": "rose"} +ANNOTATION_SET_NAME = f"temp_image_segmentation_{uuid.uuid4()}" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(capsys, shared_state, job_client): + yield + + job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) + + # Verify Data Labelling Job is cancelled, or timeout after 400 seconds + helpers.wait_for_job_state( + get_job_method=job_client.get_data_labeling_job, + name=shared_state["data_labeling_job_name"], + timeout=400, + freq=10, + ) + + # Delete the data labeling job + response = job_client.delete_data_labeling_job( + name=shared_state["data_labeling_job_name"] + ) + + print("Delete LRO:", response.operation.name) + delete_data_labeling_job_response = response.result(timeout=300) + print("delete_data_labeling_job_response", delete_data_labeling_job_response) + + out, _ = capsys.readouterr() + assert "delete_data_labeling_job_response" in out + + +# Creating a data labeling job for images +def test_create_data_labeling_job_image_segmentation_sample(capsys, shared_state): + + dataset = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}" + + create_data_labeling_job_image_segmentation_sample.create_data_labeling_job_image_segmentation_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset=dataset, + instruction_uri=INSTRUCTIONS_GCS_URI, + inputs_schema_uri=INPUTS_SCHEMA_URI, + annotation_spec=ANNOTATION_SPEC, + annotation_set_name=ANNOTATION_SET_NAME, + api_endpoint=API_ENDPOINT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created data labeing job + shared_state["data_labeling_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_data_labeling_job_specialist_pool_sample.py b/samples/snippets/create_data_labeling_job_specialist_pool_sample.py new file mode 100644 index 0000000000..5cbded1fea --- /dev/null +++ b/samples/snippets/create_data_labeling_job_specialist_pool_sample.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_data_labeling_job_specialist_pool_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_data_labeling_job_specialist_pool_sample( + project: str, + display_name: str, + dataset: str, + specialist_pool: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + inputs_dict = {"annotation_specs": [annotation_spec]} + inputs = json_format.ParseDict(inputs_dict, Value()) + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool" + }, + # Full resource name: projects/{project}/locations/{location}/specialistPools/{specialist_pool_id} + "specialist_pools": [specialist_pool], + } + parent = f"projects/{project}/locations/{location}" + response = client.create_data_labeling_job( + parent=parent, data_labeling_job=data_labeling_job + ) + print("response:", response) + + +# [END aiplatform_create_data_labeling_job_specialist_pool_sample] diff --git a/samples/snippets/create_data_labeling_job_specialist_pool_test.py b/samples/snippets/create_data_labeling_job_specialist_pool_test.py new file mode 100644 index 0000000000..8936fa6776 --- /dev/null +++ b/samples/snippets/create_data_labeling_job_specialist_pool_test.py @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os +import uuid +from google.cloud import aiplatform + +import helpers + +import create_data_labeling_job_specialist_pool_sample + +API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +DATASET_ID = "1905673553261363200" +SPECIALIST_POOL_ID = "5898026661995085824" +INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml" +DISPLAY_NAME = f"temp_create_data_labeling_job_specialist_pool_test_{uuid.uuid4()}" + +INSTRUCTIONS_GCS_URI = ( + "gs://ucaip-sample-resources/images/datalabeling_instructions.pdf" +) +ANNOTATION_SPEC = "rose" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(capsys, shared_state, job_client): + yield + + job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) + + # Verify Data Labelling Job is cancelled, or timeout after 400 seconds + helpers.wait_for_job_state( + get_job_method=job_client.get_data_labeling_job, + name=shared_state["data_labeling_job_name"], + timeout=400, + freq=10, + ) + + # Delete the data labeling job + response = job_client.delete_data_labeling_job( + name=shared_state["data_labeling_job_name"] + ) + + print("Delete LRO:", response.operation.name) + delete_data_labeling_job_response = response.result(timeout=300) + print("delete_data_labeling_job_response", delete_data_labeling_job_response) + + out, _ = capsys.readouterr() + assert "delete_data_labeling_job_response" in out + + +# Creating a data labeling job for images +def test_create_data_labeling_job_specialist_pool_sample(capsys, shared_state): + + dataset = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}" + specialist_pool = f"projects/{PROJECT_ID}/locations/{LOCATION}/specialistPools/{SPECIALIST_POOL_ID}" + + create_data_labeling_job_specialist_pool_sample.create_data_labeling_job_specialist_pool_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset=dataset, + specialist_pool=specialist_pool, + instruction_uri=INSTRUCTIONS_GCS_URI, + inputs_schema_uri=INPUTS_SCHEMA_URI, + annotation_spec=ANNOTATION_SPEC, + api_endpoint=API_ENDPOINT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created data labeing job + shared_state["data_labeling_job_name"] = helpers.get_name(out) From fb165b3632119b361a1936f367128f7146b49685 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 17 Nov 2020 16:17:17 -0800 Subject: [PATCH 05/34] feat: add custom_job samples (#69) --- ...ate_training_pipeline_custom_job_sample.py | 76 ++++++++++++++++ ...raining_pipeline_custom_job_sample_test.py | 79 +++++++++++++++++ ...eploy_model_custom_trained_model_sample.py | 62 +++++++++++++ ..._model_custom_trained_model_sample_test.py | 88 +++++++++++++++++++ 4 files changed, 305 insertions(+) create mode 100644 samples/snippets/create_training_pipeline_custom_job_sample.py create mode 100644 samples/snippets/create_training_pipeline_custom_job_sample_test.py create mode 100644 samples/snippets/deploy_model_custom_trained_model_sample.py create mode 100644 samples/snippets/deploy_model_custom_trained_model_sample_test.py diff --git a/samples/snippets/create_training_pipeline_custom_job_sample.py b/samples/snippets/create_training_pipeline_custom_job_sample.py new file mode 100644 index 0000000000..b8918f5b09 --- /dev/null +++ b/samples/snippets/create_training_pipeline_custom_job_sample.py @@ -0,0 +1,76 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_training_pipeline_custom_job_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_training_pipeline_custom_job_sample( + project: str, + display_name: str, + model_display_name: str, + container_image_uri: str, + base_output_directory_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) + + training_task_inputs_dict = { + "workerPoolSpecs": [ + { + "replicaCount": 1, + "machineSpec": {"machineType": "n1-standard-4"}, + "containerSpec": { + # A working docker image can be found at gs://cloud-samples-data/ai-platform/mnist_tfrecord/custom_job + "imageUri": container_image_uri, + "args": [ + # AIP_MODEL_DIR is set by the service according to baseOutputDirectory. + "--model_dir=$(AIP_MODEL_DIR)", + ], + }, + } + ], + "baseOutputDirectory": { + # The GCS location for outputs must be accessible by the project's AI Platform service account. + "output_uri_prefix": base_output_directory_prefix + }, + } + training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + + training_task_definition = "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" + image_uri = "gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest" + + training_pipeline = { + "display_name": display_name, + "training_task_definition": training_task_definition, + "training_task_inputs": training_task_inputs, + "model_to_upload": { + "display_name": model_display_name, + "container_spec": {"image_uri": image_uri,}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_training_pipeline( + parent=parent, training_pipeline=training_pipeline + ) + print("response:", response) + + +# [END aiplatform_create_training_pipeline_custom_job_sample] diff --git a/samples/snippets/create_training_pipeline_custom_job_sample_test.py b/samples/snippets/create_training_pipeline_custom_job_sample_test.py new file mode 100644 index 0000000000..fc593a5d44 --- /dev/null +++ b/samples/snippets/create_training_pipeline_custom_job_sample_test.py @@ -0,0 +1,79 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from uuid import uuid4 +import pytest +import os + +import helpers + +import create_training_pipeline_custom_job_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +DISPLAY_NAME = f"temp_create_training_pipeline_custom_job_test_{uuid4()}" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def pipeline_client(): + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + return pipeline_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, pipeline_client): + yield + + training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] + + pipeline_client.cancel_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + # Waiting for training pipeline to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + ) + + # Delete the training pipeline + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + +def test_ucaip_generated_create_training_pipeline_custom_job_sample(capsys, shared_state): + + create_training_pipeline_custom_job_sample.create_training_pipeline_custom_job_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model_display_name=f"Temp Model for {DISPLAY_NAME}", + container_image_uri='gcr.io/ucaip-sample-tests/mnist-custom-job:latest', + base_output_directory_prefix='gs://ucaip-samples-us-central1/training_pipeline_output' + ) + + out, _ = capsys.readouterr() + assert "response:" in out + + # Save resource name of the newly created training pipeline + shared_state["training_pipeline_name"] = helpers.get_name(out) diff --git a/samples/snippets/deploy_model_custom_trained_model_sample.py b/samples/snippets/deploy_model_custom_trained_model_sample.py new file mode 100644 index 0000000000..439bdc802e --- /dev/null +++ b/samples/snippets/deploy_model_custom_trained_model_sample.py @@ -0,0 +1,62 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_deploy_model_custom_trained_model_sample] +from google.cloud import aiplatform + + +def deploy_model_custom_trained_model_sample( + project: str, + endpoint_id: str, + model_name: str, + deployed_model_display_name: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 7200, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.EndpointServiceClient(client_options=client_options) + deployed_model = { + # format: 'projects/{project}/locations/{location}/models/{model}' + "model": model_name, + "display_name": deployed_model_display_name, + # `dedicated_resources` must be used for non-AutoML models + "dedicated_resources": { + "min_replica_count": 1, + "machine_spec": { + "machine_type": "n1-standard-2", + # Accelerators can be used only if the model specifies a GPU image. + # 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + # 'accelerator_count': 1, + }, + }, + } + # key '0' assigns traffic for the newly deployed model + # Traffic percentage values must add up to 100 + # Leave dictionary empty if endpoint should not accept any traffic + traffic_split = {"0": 100} + endpoint = client.endpoint_path( + project=project, location=location, endpoint=endpoint_id + ) + response = client.deploy_model( + endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split + ) + print("Long running operation:", response.operation.name) + deploy_model_response = response.result(timeout=timeout) + print("deploy_model_response:", deploy_model_response) + + +# [END aiplatform_deploy_model_custom_trained_model_sample] diff --git a/samples/snippets/deploy_model_custom_trained_model_sample_test.py b/samples/snippets/deploy_model_custom_trained_model_sample_test.py new file mode 100644 index 0000000000..49f096fdb6 --- /dev/null +++ b/samples/snippets/deploy_model_custom_trained_model_sample_test.py @@ -0,0 +1,88 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud import aiplatform +import deploy_model_custom_trained_model_sample + +from uuid import uuid4 +import pytest +import os + +import helpers + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +PARENT = f"projects/{PROJECT_ID}/locations/{LOCATION}" +DISPLAY_NAME = f"temp_deploy_model_custom_trained_model_test_{uuid4()}" + +# Resource Name of "permanent_custom_mnist_model" +MODEL_NAME = "projects/580378083368/locations/us-central1/models/4992732768149438464" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def endpoint_client(): + client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} + endpoint_client = aiplatform.gapic.EndpointServiceClient( + client_options=client_options + ) + return endpoint_client + + +@pytest.fixture(scope="function", autouse=True) +def setup(shared_state, endpoint_client): + create_endpoint_response = endpoint_client.create_endpoint( + parent=PARENT, endpoint={"display_name": DISPLAY_NAME} + ) + shared_state["endpoint"] = create_endpoint_response.result().name + + +def test_ucaip_generated_deploy_model_custom_trained_model_sample(capsys, shared_state): + + assert shared_state["endpoint"] is not None + + # Deploy existing image classification model to endpoint + deploy_model_custom_trained_model_sample.deploy_model_custom_trained_model_sample( + project=PROJECT_ID, + model_name=MODEL_NAME, + deployed_model_display_name=DISPLAY_NAME, + endpoint_id=shared_state["endpoint"].split("/")[-1], + ) + + # Store deployed model ID for undeploying + out, _ = capsys.readouterr() + assert "deploy_model_response" in out + + shared_state["deployed_model_id"] = helpers.get_name(out=out, key="id") + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, endpoint_client): + yield + + undeploy_model_operation = endpoint_client.undeploy_model( + deployed_model_id=shared_state["deployed_model_id"], + endpoint=shared_state["endpoint"], + ) + undeploy_model_operation.result() + + # Delete the endpoint + endpoint_client.delete_endpoint( + name=shared_state["endpoint"] + ) From 5155dee5edd86fb700a91dfca01bddd4d6393410 Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Wed, 18 Nov 2020 10:59:22 -0800 Subject: [PATCH 06/34] feat: add create_hyperparameter_tuning_job_python_package sample (#76) --- ...ameter_tuning_job_python_package_sample.py | 101 ++++++++++++++++++ ...r_tuning_job_python_package_sample_test.py | 85 +++++++++++++++ 2 files changed, 186 insertions(+) create mode 100644 samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py create mode 100644 samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py new file mode 100644 index 0000000000..8ffe7cd9b8 --- /dev/null +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py @@ -0,0 +1,101 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_hyperparameter_tuning_job_python_package_sample] +from google.cloud import aiplatform + + +def create_hyperparameter_tuning_job_python_package_sample( + project: str, + display_name: str, + executor_image_uri: str, + package_uri: str, + python_module: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + + # study_spec + metric = { + "metric_id": "val_rmse", + "goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MINIMIZE, + } + + conditional_parameter_decay = { + "parameter_spec": { + "parameter_id": "decay", + "double_value_spec": {"min_value": 1e-07, "max_value": 1}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + }, + "parent_discrete_values": {"values": [32, 64]}, + } + conditional_parameter_learning_rate = { + "parameter_spec": { + "parameter_id": "learning_rate", + "double_value_spec": {"min_value": 1e-07, "max_value": 1}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + }, + "parent_discrete_values": {"values": [4, 8, 16]}, + } + parameter = { + "parameter_id": "batch_size", + "discrete_value_spec": {"values": [4, 8, 16, 32, 64, 128]}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + "conditional_parameter_specs": [ + conditional_parameter_decay, + conditional_parameter_learning_rate, + ], + } + + # trial_job_spec + machine_spec = { + "machine_type": "n1-standard-4", + "accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + "accelerator_count": 1, + } + worker_pool_spec = { + "machine_spec": machine_spec, + "replica_count": 1, + "python_package_spec": { + "executor_image_uri": executor_image_uri, + "package_uris": [package_uri], + "python_module": python_module, + "args": [], + }, + } + + # hyperparameter_tuning_job + hyperparameter_tuning_job = { + "display_name": display_name, + "max_trial_count": 4, + "parallel_trial_count": 2, + "study_spec": { + "metrics": [metric], + "parameters": [parameter], + "algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH, + }, + "trial_job_spec": {"worker_pool_specs": [worker_pool_spec]}, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_hyperparameter_tuning_job( + parent=parent, hyperparameter_tuning_job=hyperparameter_tuning_job + ) + print("response:", response) + + +# [END aiplatform_create_hyperparameter_tuning_job_python_package_sample] diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py new file mode 100644 index 0000000000..4a328f5170 --- /dev/null +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +import helpers + +import create_hyperparameter_tuning_job_python_package_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") + +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +DISPLAY_NAME = ( + f"temp_create_hyperparameter_tuning_job_python_package_test_{uuid.uuid4()}" +) + +EXECUTOR_IMAGE_URI = "us.gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest" +PACKAGE_URI = "gs://ucaip-test-us-central1/training/pythonpackages/trainer.tar.bz2" +PYTHON_MODULE = "trainer.hptuning_trainer" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient( + client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, job_client): + yield + + # Cancel the created hyperparameter tuning job + job_client.cancel_hyperparameter_tuning_job( + name=shared_state["hyperparameter_tuning_job_name"] + ) + + # Waiting for hyperparameter tuning job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_hyperparameter_tuning_job, + name=shared_state["hyperparameter_tuning_job_name"], + ) + + # Delete the created hyperparameter tuning job + job_client.delete_hyperparameter_tuning_job( + name=shared_state["hyperparameter_tuning_job_name"] + ) + + +def test_create_hyperparameter_tuning_job_python_package_sample(capsys, shared_state): + + create_hyperparameter_tuning_job_python_package_sample.create_hyperparameter_tuning_job_python_package_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + executor_image_uri=EXECUTOR_IMAGE_URI, + package_uri=PACKAGE_URI, + python_module=PYTHON_MODULE, + ) + + out, _ = capsys.readouterr() + assert "response" in out + + shared_state["hyperparameter_tuning_job_name"] = helpers.get_name(out) From 4c60ad67dcd9026cb989d6e81dec4813cbae962f Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Wed, 25 Nov 2020 10:32:08 -0800 Subject: [PATCH 07/34] feat: add video action recognition samples (#77) Co-authored-by: Yu-Han Liu --- ...ion_job_video_action_recognition_sample.py | 60 +++++++++++ ...ction_job_video_action_recognition_test.py | 82 ++++++++++++++ ...ipeline_video_action_recognition_sample.py | 54 ++++++++++ ..._pipeline_video_action_recognition_test.py | 100 ++++++++++++++++++ ...t_model_video_action_recognition_sample.py | 44 ++++++++ ...ort_model_video_action_recognition_test.py | 45 ++++++++ ...luation_video_action_recognition_sample.py | 37 +++++++ ...valuation_video_action_recognition_test.py | 30 ++++++ ...rt_data_video_action_recognition_sample.py | 44 ++++++++ ...port_data_video_action_recognition_test.py | 84 +++++++++++++++ 10 files changed, 580 insertions(+) create mode 100644 samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_video_action_recognition_test.py create mode 100644 samples/snippets/create_training_pipeline_video_action_recognition_sample.py create mode 100644 samples/snippets/create_training_pipeline_video_action_recognition_test.py create mode 100644 samples/snippets/export_model_video_action_recognition_sample.py create mode 100644 samples/snippets/export_model_video_action_recognition_test.py create mode 100644 samples/snippets/get_model_evaluation_video_action_recognition_sample.py create mode 100644 samples/snippets/get_model_evaluation_video_action_recognition_test.py create mode 100644 samples/snippets/import_data_video_action_recognition_sample.py create mode 100644 samples/snippets/import_data_video_action_recognition_test.py diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py new file mode 100644 index 0000000000..e5775d3e36 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_video_action_recognition_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_video_action_recognition_sample( + project: str, + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = { + "confidenceThreshold": 0.5, + } + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": model_parameters, + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_video_action_recognition_sample] diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py new file mode 100644 index 0000000000..7a3e07a082 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py @@ -0,0 +1,82 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +import helpers + +import create_batch_prediction_job_video_action_recognition_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model +DISPLAY_NAME = f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}" +GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl" +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + client_options = {"api_endpoint": API_ENDPOINT} + job_client = aiplatform.gapic.JobServiceClient( + client_options=client_options) + yield job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, job_client): + yield + job_client.delete_batch_prediction_job( + name=shared_state["batch_prediction_job_name"] + ) + + +# Creating AutoML Video Object Tracking batch prediction job +def test_create_batch_prediction_job_video_action_recognition_sample( + capsys, shared_state, job_client +): + + model = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model=model, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + expected_state="SUCCEEDED", + timeout=600, + freq=20, + ) diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py new file mode 100644 index 0000000000..aff9f5059b --- /dev/null +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_training_pipeline_video_action_recognition_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_training_pipeline_video_action_recognition_sample( + project: str, + display_name: str, + dataset_id: str, + model_display_name: str, + model_type: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) + training_task_inputs_dict = { + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + "modelType": model_type + } + training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + + training_pipeline = { + "display_name": display_name, + "training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_action_recognition_1.0.0.yaml", + "training_task_inputs": training_task_inputs, + "input_data_config": {"dataset_id": dataset_id}, + "model_to_upload": {"display_name": model_display_name}, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_training_pipeline( + parent=parent, training_pipeline=training_pipeline + ) + print("response:", response) + + +# [END aiplatform_create_training_pipeline_video_action_recognition_sample] diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_test.py b/samples/snippets/create_training_pipeline_video_action_recognition_test.py new file mode 100644 index 0000000000..b443746d67 --- /dev/null +++ b/samples/snippets/create_training_pipeline_video_action_recognition_test.py @@ -0,0 +1,100 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +import helpers + +import create_training_pipeline_video_action_recognition_sample + +from google.cloud import aiplatform + +LOCATION = "us-central1" +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +DATASET_ID = "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset +DISPLAY_NAME = f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}" +MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" +MODEL_TYPE = "CLOUD" +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def pipeline_client(): + client_options = {"api_endpoint": API_ENDPOINT} + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options=client_options + ) + yield pipeline_client + + +@pytest.fixture +def model_client(): + client_options = {"api_endpoint": API_ENDPOINT} + model_client = aiplatform.gapic.ModelServiceClient( + client_options=client_options) + yield model_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, model_client, pipeline_client): + yield + model_client.delete_model(name=shared_state["model_name"]) + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + +# Training AutoML Vision Model +def test_create_training_pipeline_video_action_recognition_sample( + capsys, shared_state, pipeline_client +): + create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset_id=DATASET_ID, + model_display_name=MODEL_DISPLAY_NAME, + model_type=MODEL_TYPE, + ) + + out, _ = capsys.readouterr() + + assert "response:" in out + + # Save resource name of the newly created training pipeline + shared_state["training_pipeline_name"] = helpers.get_name(out) + + # Poll until the pipeline succeeds because we want to test the model_upload step as well. + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + expected_state="SUCCEEDED", + timeout=5000, + freq=20, + ) + + training_pipeline = pipeline_client.get_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + # Check that the model indeed has been uploaded. + assert training_pipeline.model_to_upload.name != "" + + shared_state["model_name"] = training_pipeline.model_to_upload.name diff --git a/samples/snippets/export_model_video_action_recognition_sample.py b/samples/snippets/export_model_video_action_recognition_sample.py new file mode 100644 index 0000000000..570f82fba5 --- /dev/null +++ b/samples/snippets/export_model_video_action_recognition_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_export_model_video_action_recognition_sample] +from google.cloud import aiplatform + + +def export_model_video_action_recognition_sample( + project: str, + model_id: str, + gcs_destination_output_uri_prefix: str, + export_format: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 300, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + gcs_destination = {"output_uri_prefix": gcs_destination_output_uri_prefix} + output_config = { + "artifact_destination": gcs_destination, + "export_format_id": export_format, + } + name = client.model_path(project=project, location=location, model=model_id) + response = client.export_model(name=name, output_config=output_config) + print("Long running operation:", response.operation.name) + export_model_response = response.result(timeout=timeout) + print("export_model_response:", export_model_response) + + +# [END aiplatform_export_model_video_action_recognition_sample] diff --git a/samples/snippets/export_model_video_action_recognition_test.py b/samples/snippets/export_model_video_action_recognition_test.py new file mode 100644 index 0000000000..543be7dc47 --- /dev/null +++ b/samples/snippets/export_model_video_action_recognition_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import export_model_video_action_recognition_sample +from google.cloud import storage + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +MODEL_ID = "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model +GCS_URI = "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample" +EXPORT_FORMAT = "tf-saved-model" + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + storage_client = storage.Client() + bucket = storage_client.get_bucket("ucaip-samples-test-output") + blobs = bucket.list_blobs(prefix="tmp/export_model_video_action_recognition_sample") + for blob in blobs: + blob.delete() + + +def test_export_model_video_action_recognition_sample(capsys): + export_model_video_action_recognition_sample.export_model_video_action_recognition_sample( + project=PROJECT_ID, + model_id=MODEL_ID, + gcs_destination_output_uri_prefix=GCS_URI, + export_format=EXPORT_FORMAT, + ) + out, _ = capsys.readouterr() + assert "export_model_response" in out diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_sample.py b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py new file mode 100644 index 0000000000..10fde4d286 --- /dev/null +++ b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_get_model_evaluation_video_action_recognition_sample] +from google.cloud import aiplatform + + +def get_model_evaluation_video_action_recognition_sample( + project: str, + model_id: str, + evaluation_id: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + name = client.model_evaluation_path( + project=project, location=location, model=model_id, evaluation=evaluation_id + ) + response = client.get_model_evaluation(name=name) + print("response:", response) + + +# [END aiplatform_get_model_evaluation_video_action_recognition_sample] diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_test.py b/samples/snippets/get_model_evaluation_video_action_recognition_test.py new file mode 100644 index 0000000000..973987e086 --- /dev/null +++ b/samples/snippets/get_model_evaluation_video_action_recognition_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import os + +import get_model_evaluation_video_object_tracking_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model +EVALUATION_ID = "305008923591573504" # Permanent horses evaluation + + +def test_ucaip_generated_get_model_evaluation_sample(capsys): + get_model_evaluation_video_object_tracking_sample.get_model_evaluation_video_object_tracking_sample( + project=PROJECT_ID, model_id=MODEL_ID, evaluation_id=EVALUATION_ID + ) + out, _ = capsys.readouterr() + assert "metrics_schema_uri" in out diff --git a/samples/snippets/import_data_video_action_recognition_sample.py b/samples/snippets/import_data_video_action_recognition_sample.py new file mode 100644 index 0000000000..ccc4ec1de9 --- /dev/null +++ b/samples/snippets/import_data_video_action_recognition_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_import_data_video_action_recognition_sample] +from google.cloud import aiplatform + + +def import_data_video_action_recognition_sample( + project: str, + dataset_id: str, + gcs_source_uri: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 1800, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) + import_configs = [ + { + "gcs_source": {"uris": [gcs_source_uri]}, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_action_recognition_io_format_1.0.0.yaml", + } + ] + name = client.dataset_path(project=project, location=location, dataset=dataset_id) + response = client.import_data(name=name, import_configs=import_configs) + print("Long running operation:", response.operation.name) + import_data_response = response.result(timeout=timeout) + print("import_data_response:", import_data_response) + + +# [END aiplatform_import_data_video_action_recognition_sample] diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py new file mode 100644 index 0000000000..e62dc1f49f --- /dev/null +++ b/samples/snippets/import_data_video_action_recognition_test.py @@ -0,0 +1,84 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest +import os + +import uuid +from google.cloud import aiplatform + +import import_data_video_action_recognition_sample + + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" +METADATA_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" + +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" +DISPLAY_NAME = f"temp_import_data_video_action_recognition_test_{uuid.uuid4()}" + + +@pytest.fixture +def shared_state(): + shared_state = {} + yield shared_state + + +@pytest.fixture +def dataset_client(): + client_options = {"api_endpoint": API_ENDPOINT} + dataset_client = aiplatform.gapic.DatasetServiceClient( + client_options=client_options + ) + yield dataset_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, dataset_client): + + yield + dataset_name = dataset_client.dataset_path( + project=PROJECT_ID, location=LOCATION, dataset=shared_state["dataset_id"] + ) + response = dataset_client.delete_dataset(name=dataset_name) + delete_dataset_response = response.result(timeout=120) + + +def test_import_data_video_action_recognition_sample( + capsys, shared_state, dataset_client +): + + dataset = aiplatform.gapic.Dataset( + display_name=DISPLAY_NAME, metadata_schema_uri=METADATA_SCHEMA_URI, + ) + + response = dataset_client.create_dataset( + parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset + ) + + create_dataset_response = response.result(timeout=120) + + shared_state["dataset_name"] = create_dataset_response.name + shared_state["dataset_id"] = create_dataset_response.name.split("/")[-1] + + import_data_video_action_recognition_sample.import_data_video_action_recognition_sample( + project=PROJECT_ID, + dataset_id=shared_state["dataset_id"], + gcs_source_uri=GCS_SOURCE, + ) + out, _ = capsys.readouterr() + + assert "import_data_response" in out From b012283c08cf8abc2974dc73ff7c2d3b8112a16b Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Wed, 25 Nov 2020 11:51:00 -0800 Subject: [PATCH 08/34] feat: add create_training_pipeline_custom_training_managed_dataset sample (#75) Co-authored-by: Yu-Han Liu --- ..._custom_training_managed_dataset_sample.py | 103 +++++++++++++++++ ...om_training_managed_dataset_sample_test.py | 107 ++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py create mode 100644 samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py new file mode 100644 index 0000000000..3638b53abc --- /dev/null +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py @@ -0,0 +1,103 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_training_pipeline_custom_training_managed_dataset_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_training_pipeline_custom_training_managed_dataset_sample( + project: str, + display_name: str, + model_display_name: str, + dataset_id: str, + annotation_schema_uri: str, + training_container_spec_image_uri: str, + model_container_spec_image_uri: str, + base_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for + # multiple requests. + client = aiplatform.gapic.PipelineServiceClient( + client_options=client_options) + + # input_data_config + input_data_config = { + "dataset_id": dataset_id, + "annotation_schema_uri": annotation_schema_uri, + "gcs_destination": {"output_uri_prefix": base_output_uri_prefix}, + } + + # training_task_definition + custom_task_definition = "gs://google-cloud-aiplatform/schema/" \ + "trainingjob/definition/custom_task_1.0.0.yaml" + + # training_task_inputs + training_container_spec = { + "imageUri": training_container_spec_image_uri, + # AIP_MODEL_DIR is set by the service according to baseOutputDirectory. + "args": ["--model-dir=$(AIP_MODEL_DIR)",], + } + + training_worker_pool_spec = { + "replicaCount": 1, + "machineSpec": {"machineType": "n1-standard-8"}, + "containerSpec": training_container_spec, + } + + training_task_inputs_dict = { + "workerPoolSpecs": [training_worker_pool_spec], + "baseOutputDirectory": {"outputUriPrefix": base_output_uri_prefix}, + } + + training_task_inputs = json_format.ParseDict( + training_task_inputs_dict, Value()) + + # model_to_upload + model_container_spec = { + "image_uri": model_container_spec_image_uri, + "command": ["/bin/tensorflow_model_server"], + "args": [ + "--model_name=$(AIP_MODEL)", + "--model_base_path=$(AIP_STORAGE_URI)", + "--rest_api_port=8080", + "--port=8500", + "--file_system_poll_wait_seconds=31540000" + ], + } + + model = { + "display_name": model_display_name, + "container_spec": model_container_spec} + + training_pipeline = { + "display_name": display_name, + "input_data_config": input_data_config, + "training_task_definition": custom_task_definition, + "training_task_inputs": training_task_inputs, + "model_to_upload": model, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_training_pipeline( + parent=parent, training_pipeline=training_pipeline + ) + print("response:", response) + + +# [END aiplatform_create_training_pipeline_custom_training_managed_dataset_sample] diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py new file mode 100644 index 0000000000..85c6f22734 --- /dev/null +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py @@ -0,0 +1,107 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import pytest +import os + +from google.cloud import aiplatform + +import helpers + +import create_training_pipeline_custom_training_managed_dataset_sample + +API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +DISPLAY_NAME = f"temp_create_training_pipeline_custom_training_managed_dataset_test_{uuid.uuid4()}" +MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" + +DATASET_ID = "1084241610289446912" # permanent_50_flowers_dataset +ANNOTATION_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/annotation/image_classification_1.0.0.yaml" + +TRAINING_CONTAINER_SPEC_IMAGE_URI = "gcr.io/ucaip-test/custom-container-managed-dataset:latest" +MODEL_CONTAINER_SPEC_IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest" + +BASE_OUTPUT_URI_PREFIX = "gs://ucaip-samples-us-central1/training_pipeline_output/custom_training_managed_dataset" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def pipeline_client(): + client_options = {"api_endpoint": API_ENDPOINT} + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options=client_options + ) + yield pipeline_client + + +@pytest.fixture +def model_client(): + client_options = {"api_endpoint": API_ENDPOINT} + model_client = aiplatform.gapic.ModelServiceClient( + client_options=client_options) + yield model_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, model_client, pipeline_client): + yield + model_client.delete_model(name=shared_state["model_name"]) + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + +def test_create_training_pipeline_custom_training_managed_dataset_sample( + capsys, shared_state, pipeline_client +): + create_training_pipeline_custom_training_managed_dataset_sample.create_training_pipeline_custom_training_managed_dataset_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model_display_name=MODEL_DISPLAY_NAME, + dataset_id=DATASET_ID, + annotation_schema_uri=ANNOTATION_SCHEMA_URI, + training_container_spec_image_uri=TRAINING_CONTAINER_SPEC_IMAGE_URI, + model_container_spec_image_uri=MODEL_CONTAINER_SPEC_IMAGE_URI, + base_output_uri_prefix=BASE_OUTPUT_URI_PREFIX, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created training pipeline + shared_state["training_pipeline_name"] = helpers.get_name(out) + + # Poll until the pipeline succeeds because we want to test the model_upload step as well. + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + expected_state="SUCCEEDED", + timeout=1800, + freq=20, + ) + + training_pipeline = pipeline_client.get_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + # Check that the model indeed has been uploaded. + assert training_pipeline.model_to_upload.name != "" + + shared_state["model_name"] = training_pipeline.model_to_upload.name From ad09c29c1685a904966e34894c1c4ea77baa2425 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 25 Nov 2020 21:53:02 -0800 Subject: [PATCH 09/34] feat: adds text batch prediction samples (#82) * feat: adds text batch prediction samples * fix: lint * fix: broken test * fix: tests * fix: more changes * fix: TSA batch prediction test updates * fix: working model (I hope!) Co-authored-by: Yu-Han Liu --- ...ediction_job_text_classification_sample.py | 55 ++++++++++++ ...ion_job_text_classification_sample_test.py | 85 +++++++++++++++++++ ...ction_job_text_entity_extraction_sample.py | 55 ++++++++++++ ..._job_text_entity_extraction_sample_test.py | 85 +++++++++++++++++++ ...tion_job_text_sentiment_analysis_sample.py | 55 ++++++++++++ ...job_text_sentiment_analysis_sample_test.py | 85 +++++++++++++++++++ ...ta_labeling_job_image_segmentation_test.py | 1 + .../snippets/create_endpoint_sample_test.py | 3 +- ...r_tuning_job_python_package_sample_test.py | 1 + ...ate_training_pipeline_custom_job_sample.py | 2 +- samples/snippets/deploy_model_sample_test.py | 3 +- ...classification_single_label_sample_test.py | 2 + samples/snippets/upload_model_sample_test.py | 3 +- 13 files changed, 431 insertions(+), 4 deletions(-) create mode 100644 samples/snippets/create_batch_prediction_job_text_classification_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_text_classification_sample_test.py create mode 100644 samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py create mode 100644 samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample.py b/samples/snippets/create_batch_prediction_job_text_classification_sample.py new file mode 100644 index 0000000000..a9a9ad6e67 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_text_classification_sample] +from google.cloud import aiplatform +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_text_classification_sample( + project: str, + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_text_classification_sample] diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py new file mode 100644 index 0000000000..86d73b5ac2 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from uuid import uuid4 +import pytest +import os + +import helpers + +import create_batch_prediction_job_text_classification_sample +import cancel_batch_prediction_job_sample +import delete_batch_prediction_job_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "3863595899074641920" # Permanent restaurant rating model +DISPLAY_NAME = f"temp_create_batch_prediction_tcn_test_{uuid4()}" +GCS_SOURCE_URI = ( + "gs://ucaip-samples-test-output/inputs/batch_predict_TCN/tcn_inputs.jsonl" +) +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" + + +@pytest.fixture(scope="function") +def shared_state(): + + shared_state = {} + + yield shared_state + + assert "/" in shared_state["batch_prediction_job_name"] + + batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] + + # Stop the batch prediction job + cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + # Delete the batch prediction job + delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + +# Creating AutoML Text Classification batch prediction job +def test_ucaip_generated_create_batch_prediction_tcn_sample(capsys, shared_state): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_text_classification_sample.create_batch_prediction_job_text_classification_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model=model_name, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py new file mode 100644 index 0000000000..ec950613a0 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_text_entity_extraction_sample] +from google.cloud import aiplatform +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_text_entity_extraction_sample( + project: str, + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_text_entity_extraction_sample] diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py new file mode 100644 index 0000000000..c86395d623 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from uuid import uuid4 +import pytest +import os + +import helpers + +import create_batch_prediction_job_text_entity_extraction_sample +import cancel_batch_prediction_job_sample +import delete_batch_prediction_job_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "5216364637146054656" # Permanent medical entity NL model +DISPLAY_NAME = f"temp_create_batch_prediction_ten_test_{uuid4()}" +GCS_SOURCE_URI = ( + "gs://ucaip-samples-test-output/inputs/batch_predict_TEN/ten_inputs.jsonl" +) +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" + + +@pytest.fixture(scope="function") +def shared_state(): + + shared_state = {} + + yield shared_state + + assert "/" in shared_state["batch_prediction_job_name"] + + batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] + + # Stop the batch prediction job + cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + # Delete the batch prediction job + delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + +# Creating AutoML Text Entity Extraction batch prediction job +def test_ucaip_generated_create_batch_prediction_ten_sample(capsys, shared_state): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_text_entity_extraction_sample.create_batch_prediction_job_text_entity_extraction_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model=model_name, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..22bd7a31c4 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_text_sentiment_analysis_sample] +from google.cloud import aiplatform +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_text_sentiment_analysis_sample( + project: str, + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_text_sentiment_analysis_sample] diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py new file mode 100644 index 0000000000..d3fc196707 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from uuid import uuid4 +import pytest +import os + +import helpers + +import create_batch_prediction_job_text_sentiment_analysis_sample +import cancel_batch_prediction_job_sample +import delete_batch_prediction_job_sample + +from google.cloud import aiplatform + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "4792568875336073216" # Permanent economic sentiment model +DISPLAY_NAME = f"temp_create_batch_prediction_tsn_test_{uuid4()}" +GCS_SOURCE_URI = ( + "gs://ucaip-samples-test-output/inputs/batch_predict_TSN/tsn_inputs.jsonl" +) +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" + + +@pytest.fixture(scope="function") +def shared_state(): + + shared_state = {} + + yield shared_state + + assert "/" in shared_state["batch_prediction_job_name"] + + batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] + + # Stop the batch prediction job + cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + # Delete the batch prediction job + delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( + project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job + ) + + +# Creating AutoML Text Sentiment Analysis batch prediction job +def test_ucaip_generated_create_batch_prediction_tsn_sample(capsys, shared_state): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_text_sentiment_analysis_sample.create_batch_prediction_job_text_sentiment_analysis_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model=model_name, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_test.py b/samples/snippets/create_data_labeling_job_image_segmentation_test.py index 79f40d949c..3e78657484 100644 --- a/samples/snippets/create_data_labeling_job_image_segmentation_test.py +++ b/samples/snippets/create_data_labeling_job_image_segmentation_test.py @@ -34,6 +34,7 @@ ANNOTATION_SPEC = {"color": {"red": 1.0}, "displayName": "rose"} ANNOTATION_SET_NAME = f"temp_image_segmentation_{uuid.uuid4()}" + @pytest.fixture def shared_state(): state = {} diff --git a/samples/snippets/create_endpoint_sample_test.py b/samples/snippets/create_endpoint_sample_test.py index 4f66945908..cca027cdfc 100644 --- a/samples/snippets/create_endpoint_sample_test.py +++ b/samples/snippets/create_endpoint_sample_test.py @@ -18,7 +18,8 @@ import helpers -import create_endpoint_sample, delete_endpoint_sample +import create_endpoint_sample +import delete_endpoint_sample DISPLAY_NAME = f"temp_create_endpoint_test_{uuid4()}" PROJECT = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py index 4a328f5170..6ec111bcac 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py @@ -34,6 +34,7 @@ PACKAGE_URI = "gs://ucaip-test-us-central1/training/pythonpackages/trainer.tar.bz2" PYTHON_MODULE = "trainer.hptuning_trainer" + @pytest.fixture def shared_state(): state = {} diff --git a/samples/snippets/create_training_pipeline_custom_job_sample.py b/samples/snippets/create_training_pipeline_custom_job_sample.py index b8918f5b09..f9e22bc867 100644 --- a/samples/snippets/create_training_pipeline_custom_job_sample.py +++ b/samples/snippets/create_training_pipeline_custom_job_sample.py @@ -63,7 +63,7 @@ def create_training_pipeline_custom_job_sample( "training_task_inputs": training_task_inputs, "model_to_upload": { "display_name": model_display_name, - "container_spec": {"image_uri": image_uri,}, + "container_spec": {"image_uri": image_uri, }, }, } parent = f"projects/{project}/locations/{location}" diff --git a/samples/snippets/deploy_model_sample_test.py b/samples/snippets/deploy_model_sample_test.py index 2960a7f3d3..46f8c03f2e 100644 --- a/samples/snippets/deploy_model_sample_test.py +++ b/samples/snippets/deploy_model_sample_test.py @@ -13,7 +13,8 @@ # limitations under the License. from google.cloud import aiplatform -import deploy_model_sample, delete_endpoint_sample +import deploy_model_sample +import delete_endpoint_sample from uuid import uuid4 import pytest diff --git a/samples/snippets/import_data_text_classification_single_label_sample_test.py b/samples/snippets/import_data_text_classification_single_label_sample_test.py index 6b7dbdd195..1ea0afaab2 100644 --- a/samples/snippets/import_data_text_classification_single_label_sample_test.py +++ b/samples/snippets/import_data_text_classification_single_label_sample_test.py @@ -21,6 +21,8 @@ # Test to assert that the import data function was called. We assert that the function was called # rather than wait for this LRO to complete + + def test_ucaip_generated_import_data_text_classification_single_label_sample(): response = MagicMock() response.next_page_token = b"" diff --git a/samples/snippets/upload_model_sample_test.py b/samples/snippets/upload_model_sample_test.py index 3814ddd4a7..7cc9635de6 100644 --- a/samples/snippets/upload_model_sample_test.py +++ b/samples/snippets/upload_model_sample_test.py @@ -18,7 +18,8 @@ import helpers -import upload_model_sample, delete_model_sample +import upload_model_sample +import delete_model_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") IMAGE_URI = "gcr.io/cloud-ml-service-public/cloud-ml-online-prediction-model-server-cpu:v1_15py3cmle_op_images_20200229_0210_RC00" From 19dc31a7e63ec112e9d0dc72e22db04910137d07 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Thu, 26 Nov 2020 08:59:53 -0800 Subject: [PATCH 10/34] docs: update readme (#81) --- README.rst | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 3db9fc5022..209b577ead 100644 --- a/README.rst +++ b/README.rst @@ -4,13 +4,7 @@ Python Client for Cloud AI Platform |beta| |pypi| |versions| -:Warning: This library is a pre-release product and is subject to breaking changes. - -`Cloud AI Platform`_: Cloud AI Platform is a suite of machine learning tools that enables - developers to train high-quality models specific to their business needs. - It offers both novices and experts the best workbench for machine learning - development by leveraging Google's state-of-the-art transfer learning and - Neural Architecture Search technology. +`Cloud AI Platform`_: Google Cloud AI Platform is an integrated suite of machine learning tools and services for building and using ML models with AutoML or custom code. It offers both novices and experts the best workbench for the entire machine learning development lifecycle. - `Client Library Documentation`_ - `Product Documentation`_ @@ -21,9 +15,9 @@ Python Client for Cloud AI Platform :target: https://pypi.org/project/google-cloud-aiplatform/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-aiplatform.svg :target: https://pypi.org/project/google-cloud-aiplatform/ -.. _Cloud AI Platform: https://cloud.google.com/ai-platform/docs +.. _Cloud AI Platform: https://cloud.google.com/ai-platform-unified/docs .. _Client Library Documentation: https://googleapis.dev/python/aiplatform/latest -.. _Product Documentation: https://cloud.google.com/ai-platform/docs +.. _Product Documentation: https://cloud.google.com/ai-platform-unified/docs Quick Start ----------- @@ -85,5 +79,5 @@ Next Steps - View this `README`_ to see the full list of Cloud APIs that we cover. -.. _Cloud AI Platform API Product documentation: https://cloud.google.com/ai-platform/docs +.. _Cloud AI Platform API Product documentation: https://cloud.google.com/ai-platform-unified/docs .. _README: https://github.com/googleapis/google-cloud-python/blob/master/README.rst \ No newline at end of file From 860d12e3cbcf2d300559d0acbb3058e2d66e45e2 Mon Sep 17 00:00:00 2001 From: Morgan Du Date: Thu, 26 Nov 2020 09:56:21 -0800 Subject: [PATCH 11/34] test: var (#89) Co-authored-by: Yu-Han Liu --- ...ction_job_video_action_recognition_test.py | 35 +++++++----- ..._pipeline_video_action_recognition_test.py | 55 ++++++++----------- ...ort_model_video_action_recognition_test.py | 9 ++- ...port_data_video_action_recognition_test.py | 4 +- 4 files changed, 54 insertions(+), 49 deletions(-) diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py index 7a3e07a082..d489cb421d 100644 --- a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py @@ -25,11 +25,14 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model -DISPLAY_NAME = f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}" +DISPLAY_NAME = ( + f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}" +) GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl" GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + @pytest.fixture def shared_state(): state = {} @@ -39,14 +42,27 @@ def shared_state(): @pytest.fixture def job_client(): client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient( - client_options=client_options) + job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) yield job_client @pytest.fixture(scope="function", autouse=True) def teardown(shared_state, job_client): yield + + # Stop the batch prediction job + # Delete the batch prediction job + job_client.cancel_batch_prediction_job( + name=shared_state["batch_prediction_job_name"] + ) + + # Waiting for batch prediction job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + # Delete the batch prediction job job_client.delete_batch_prediction_job( name=shared_state["batch_prediction_job_name"] ) @@ -57,12 +73,12 @@ def test_create_batch_prediction_job_video_action_recognition_sample( capsys, shared_state, job_client ): - model = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, - model=model, + model=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, ) @@ -71,12 +87,3 @@ def test_create_batch_prediction_job_video_action_recognition_sample( # Save resource name of the newly created batch prediction job shared_state["batch_prediction_job_name"] = helpers.get_name(out) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - expected_state="SUCCEEDED", - timeout=600, - freq=20, - ) diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_test.py b/samples/snippets/create_training_pipeline_video_action_recognition_test.py index b443746d67..ab653a49e1 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_test.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_test.py @@ -24,12 +24,17 @@ LOCATION = "us-central1" PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -DATASET_ID = "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset -DISPLAY_NAME = f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}" +DATASET_ID = ( + "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset +) +DISPLAY_NAME = ( + f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}" +) MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" MODEL_TYPE = "CLOUD" API_ENDPOINT = "us-central1-aiplatform.googleapis.com" + @pytest.fixture def shared_state(): state = {} @@ -44,27 +49,32 @@ def pipeline_client(): ) yield pipeline_client +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, pipeline_client): + yield -@pytest.fixture -def model_client(): - client_options = {"api_endpoint": API_ENDPOINT} - model_client = aiplatform.gapic.ModelServiceClient( - client_options=client_options) - yield model_client + # Stop the training pipeline + pipeline_client.cancel_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + # Waiting for training pipeline to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + ) -@pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, model_client, pipeline_client): - yield - model_client.delete_model(name=shared_state["model_name"]) + # Delete the training pipeline pipeline_client.delete_training_pipeline( name=shared_state["training_pipeline_name"] ) + + # Training AutoML Vision Model def test_create_training_pipeline_video_action_recognition_sample( - capsys, shared_state, pipeline_client + capsys, shared_state ): create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample( project=PROJECT_ID, @@ -75,26 +85,7 @@ def test_create_training_pipeline_video_action_recognition_sample( ) out, _ = capsys.readouterr() - assert "response:" in out # Save resource name of the newly created training pipeline shared_state["training_pipeline_name"] = helpers.get_name(out) - - # Poll until the pipeline succeeds because we want to test the model_upload step as well. - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - expected_state="SUCCEEDED", - timeout=5000, - freq=20, - ) - - training_pipeline = pipeline_client.get_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - - # Check that the model indeed has been uploaded. - assert training_pipeline.model_to_upload.name != "" - - shared_state["model_name"] = training_pipeline.model_to_upload.name diff --git a/samples/snippets/export_model_video_action_recognition_test.py b/samples/snippets/export_model_video_action_recognition_test.py index 543be7dc47..fabb228147 100644 --- a/samples/snippets/export_model_video_action_recognition_test.py +++ b/samples/snippets/export_model_video_action_recognition_test.py @@ -19,10 +19,15 @@ from google.cloud import storage PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model -GCS_URI = "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample" +MODEL_ID = ( + "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model +) +GCS_URI = ( + "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample" +) EXPORT_FORMAT = "tf-saved-model" + @pytest.fixture(scope="function", autouse=True) def teardown(): yield diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py index e62dc1f49f..cacb56de70 100644 --- a/samples/snippets/import_data_video_action_recognition_test.py +++ b/samples/snippets/import_data_video_action_recognition_test.py @@ -25,7 +25,9 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" -METADATA_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" +METADATA_SCHEMA_URI = ( + "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" +) API_ENDPOINT = "us-central1-aiplatform.googleapis.com" DISPLAY_NAME = f"temp_import_data_video_action_recognition_test_{uuid.uuid4()}" From 77956b2b3d951599d28a990c807e517429e6bd59 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 30 Nov 2020 14:07:27 -0800 Subject: [PATCH 12/34] chore: sample tests lint (#90) * chore: sample tests lint * lint * lnt * lint --- .../cancel_training_pipeline_sample_test.py | 12 ++++---- ...ion_job_text_classification_sample_test.py | 11 ++++---- ..._job_text_entity_extraction_sample_test.py | 11 ++++---- ...job_text_sentiment_analysis_sample_test.py | 11 ++++---- ...ction_job_video_action_recognition_test.py | 9 +++--- ...on_job_video_classification_sample_test.py | 11 ++++---- ...n_job_video_object_tracking_sample_test.py | 11 ++++---- .../snippets/create_custom_job_sample_test.py | 9 +++--- ..._data_labeling_job_active_learning_test.py | 6 ++-- ...ta_labeling_job_image_segmentation_test.py | 6 ++-- ...te_data_labeling_job_images_sample_test.py | 8 ++---- .../create_data_labeling_job_sample_test.py | 8 ++---- ..._data_labeling_job_specialist_pool_test.py | 6 ++-- ...ate_data_labeling_job_video_sample_test.py | 8 ++---- .../create_dataset_image_sample_test.py | 6 ++-- .../snippets/create_dataset_sample_test.py | 6 ++-- ...te_dataset_tabular_bigquery_sample_test.py | 6 ++-- .../create_dataset_tabular_gcs_sample_test.py | 6 ++-- .../create_dataset_video_sample_test.py | 6 ++-- .../snippets/create_endpoint_sample_test.py | 4 +-- ...r_tuning_job_python_package_sample_test.py | 9 +++--- ...e_hyperparameter_tuning_job_sample_test.py | 9 +++--- ...ate_training_pipeline_custom_job_sample.py | 2 +- ...raining_pipeline_custom_job_sample_test.py | 11 ++++---- ..._custom_training_managed_dataset_sample.py | 28 ++++++------------- ...om_training_managed_dataset_sample_test.py | 7 ++--- ...peline_image_classification_sample_test.py | 11 ++++---- ...line_image_object_detection_sample_test.py | 11 ++++---- .../create_training_pipeline_sample_test.py | 11 ++++---- ...line_tabular_classification_sample_test.py | 11 ++++---- ...pipeline_tabular_regression_sample_test.py | 11 ++++---- ...line_text_entity_extraction_sample_test.py | 11 ++++---- ...ine_text_sentiment_analysis_sample_test.py | 11 ++++---- ..._pipeline_video_action_recognition_test.py | 12 ++++---- ...peline_video_classification_sample_test.py | 11 ++++---- ...eline_video_object_tracking_sample_test.py | 11 ++++---- ..._model_custom_trained_model_sample_test.py | 8 +++--- samples/snippets/deploy_model_sample_test.py | 10 +++---- ...odel_tabular_classification_sample_test.py | 6 ++-- ...ort_model_video_action_recognition_test.py | 5 ++-- .../snippets/get_custom_job_sample_test.py | 2 +- ...t_hyperparameter_tuning_job_sample_test.py | 2 +- .../get_model_evaluation_sample_test.py | 2 +- .../get_model_evaluation_slice_sample_test.py | 2 +- ...tion_tabular_classification_sample_test.py | 2 +- ...aluation_tabular_regression_sample_test.py | 2 +- ...valuation_video_action_recognition_test.py | 2 +- ...uation_video_classification_sample_test.py | 2 +- ...ation_video_object_tracking_sample_test.py | 2 +- samples/snippets/get_model_sample_test.py | 2 +- .../get_training_pipeline_sample_test.py | 2 +- samples/snippets/helpers.py | 2 +- ...classification_single_label_sample_test.py | 5 ++-- ...data_text_entity_extraction_sample_test.py | 6 ++-- ...ata_text_sentiment_analysis_sample_test.py | 6 ++-- ...port_data_video_action_recognition_test.py | 7 ++--- ...t_data_video_classification_sample_test.py | 7 ++--- ..._data_video_object_tracking_sample_test.py | 7 ++--- ...ist_model_evaluation_slices_sample_test.py | 2 +- .../predict_custom_trained_model_sample.py | 3 +- ...redict_custom_trained_model_sample_test.py | 5 ++-- .../predict_image_classification_sample.py | 3 +- ...redict_image_classification_sample_test.py | 4 +-- .../predict_image_object_detection_sample.py | 3 +- ...dict_image_object_detection_sample_test.py | 3 +- samples/snippets/predict_sample.py | 3 +- .../predict_tabular_classification_sample.py | 3 +- ...dict_tabular_classification_sample_test.py | 2 +- .../predict_tabular_regression_sample.py | 3 +- .../predict_tabular_regression_sample_test.py | 2 +- ...classification_single_label_sample_test.py | 2 +- ...dict_text_entity_extraction_sample_test.py | 2 +- ...ict_text_sentiment_analysis_sample_test.py | 2 +- samples/snippets/upload_model_sample_test.py | 6 ++-- 74 files changed, 212 insertions(+), 265 deletions(-) diff --git a/samples/snippets/cancel_training_pipeline_sample_test.py b/samples/snippets/cancel_training_pipeline_sample_test.py index 9ffb601e42..e16b384c22 100644 --- a/samples/snippets/cancel_training_pipeline_sample_test.py +++ b/samples/snippets/cancel_training_pipeline_sample_test.py @@ -12,18 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_sample import cancel_training_pipeline_sample +import create_training_pipeline_sample import delete_training_pipeline_sample -import get_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py index 86d73b5ac2..003b858416 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_batch_prediction_job_text_classification_sample import cancel_batch_prediction_job_sample +import create_batch_prediction_job_text_classification_sample import delete_batch_prediction_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py index c86395d623..069730a9b5 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_batch_prediction_job_text_entity_extraction_sample import cancel_batch_prediction_job_sample +import create_batch_prediction_job_text_entity_extraction_sample import delete_batch_prediction_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py index d3fc196707..192312086f 100644 --- a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_batch_prediction_job_text_sentiment_analysis_sample import cancel_batch_prediction_job_sample +import create_batch_prediction_job_text_sentiment_analysis_sample import delete_batch_prediction_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py index d489cb421d..269410ac17 100644 --- a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid -import pytest import os +import uuid -import helpers +from google.cloud import aiplatform +import pytest import create_batch_prediction_job_video_action_recognition_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py index 41752dc12b..f9344ab6ec 100644 --- a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_batch_prediction_job_video_classification_sample import cancel_batch_prediction_job_sample +import create_batch_prediction_job_video_classification_sample import delete_batch_prediction_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py index 6ac8660193..46ace425d6 100644 --- a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_batch_prediction_job_video_object_tracking_sample import cancel_batch_prediction_job_sample +import create_batch_prediction_job_video_object_tracking_sample import delete_batch_prediction_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/create_custom_job_sample_test.py b/samples/snippets/create_custom_job_sample_test.py index affc2895f0..2b699b2061 100644 --- a/samples/snippets/create_custom_job_sample_test.py +++ b/samples/snippets/create_custom_job_sample_test.py @@ -13,17 +13,16 @@ # limitations under the License. -import pytest -import uuid import os +import uuid -import helpers +from google.cloud import aiplatform +import pytest import cancel_custom_job_sample import create_custom_job_sample import delete_custom_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") CONTAINER_IMAGE_URI = "gcr.io/ucaip-test/ucaip-training-test:latest" diff --git a/samples/snippets/create_data_labeling_job_active_learning_test.py b/samples/snippets/create_data_labeling_job_active_learning_test.py index 38a7c0c1c0..fff2eff6f4 100644 --- a/samples/snippets/create_data_labeling_job_active_learning_test.py +++ b/samples/snippets/create_data_labeling_job_active_learning_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os import uuid -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_active_learning_sample +import helpers API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_test.py b/samples/snippets/create_data_labeling_job_image_segmentation_test.py index 3e78657484..5ed031e11b 100644 --- a/samples/snippets/create_data_labeling_job_image_segmentation_test.py +++ b/samples/snippets/create_data_labeling_job_image_segmentation_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os import uuid -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_image_segmentation_sample +import helpers API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_data_labeling_job_images_sample_test.py b/samples/snippets/create_data_labeling_job_images_sample_test.py index 8d99132f18..ae5bdce280 100644 --- a/samples/snippets/create_data_labeling_job_images_sample_test.py +++ b/samples/snippets/create_data_labeling_job_images_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os from uuid import uuid4 -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_images_sample -import cancel_data_labeling_job_sample -import delete_data_labeling_job_sample +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") diff --git a/samples/snippets/create_data_labeling_job_sample_test.py b/samples/snippets/create_data_labeling_job_sample_test.py index dc9a0baacb..220628d8ba 100644 --- a/samples/snippets/create_data_labeling_job_sample_test.py +++ b/samples/snippets/create_data_labeling_job_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os from uuid import uuid4 -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_sample -import cancel_data_labeling_job_sample -import delete_data_labeling_job_sample +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") diff --git a/samples/snippets/create_data_labeling_job_specialist_pool_test.py b/samples/snippets/create_data_labeling_job_specialist_pool_test.py index 8936fa6776..afb49613fb 100644 --- a/samples/snippets/create_data_labeling_job_specialist_pool_test.py +++ b/samples/snippets/create_data_labeling_job_specialist_pool_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os import uuid -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_specialist_pool_sample +import helpers API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_data_labeling_job_video_sample_test.py b/samples/snippets/create_data_labeling_job_video_sample_test.py index 74b20147f9..6d782e4887 100644 --- a/samples/snippets/create_data_labeling_job_video_sample_test.py +++ b/samples/snippets/create_data_labeling_job_video_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os from uuid import uuid4 -from google.cloud import aiplatform -import helpers +from google.cloud import aiplatform +import pytest import create_data_labeling_job_video_sample -import cancel_data_labeling_job_sample -import delete_data_labeling_job_sample +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") diff --git a/samples/snippets/create_dataset_image_sample_test.py b/samples/snippets/create_dataset_image_sample_test.py index ea2f2607ca..3d7664a14a 100644 --- a/samples/snippets/create_dataset_image_sample_test.py +++ b/samples/snippets/create_dataset_image_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 import pytest -import os - -import helpers import create_dataset_image_sample import delete_dataset_sample - +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_dataset_sample_test.py b/samples/snippets/create_dataset_sample_test.py index 3508092e8c..d0a674f101 100644 --- a/samples/snippets/create_dataset_sample_test.py +++ b/samples/snippets/create_dataset_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 import pytest -import os - -import helpers import create_dataset_sample import delete_dataset_sample - +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") IMAGE_METADATA_SCHEMA_URI = ( diff --git a/samples/snippets/create_dataset_tabular_bigquery_sample_test.py b/samples/snippets/create_dataset_tabular_bigquery_sample_test.py index a582760cfe..53f03de97f 100644 --- a/samples/snippets/create_dataset_tabular_bigquery_sample_test.py +++ b/samples/snippets/create_dataset_tabular_bigquery_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 import pytest -import os - -import helpers import create_dataset_tabular_bigquery_sample import delete_dataset_sample - +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") BIGQUERY_URI = "bq://ucaip-sample-tests.table_test.all_bq_types" diff --git a/samples/snippets/create_dataset_tabular_gcs_sample_test.py b/samples/snippets/create_dataset_tabular_gcs_sample_test.py index 3206a56e97..98e21c881e 100644 --- a/samples/snippets/create_dataset_tabular_gcs_sample_test.py +++ b/samples/snippets/create_dataset_tabular_gcs_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 import pytest -import os - -import helpers import create_dataset_tabular_gcs_sample import delete_dataset_sample - +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") GCS_URI = "gs://ucaip-sample-resources/iris_1000.csv" diff --git a/samples/snippets/create_dataset_video_sample_test.py b/samples/snippets/create_dataset_video_sample_test.py index 844a25c204..1e6f85b6bd 100644 --- a/samples/snippets/create_dataset_video_sample_test.py +++ b/samples/snippets/create_dataset_video_sample_test.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 import pytest -import os - -import helpers import create_dataset_video_sample import delete_dataset_sample - +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") VIDEO_METADATA_SCHEMA_URI = ( diff --git a/samples/snippets/create_endpoint_sample_test.py b/samples/snippets/create_endpoint_sample_test.py index cca027cdfc..2613a4afd0 100644 --- a/samples/snippets/create_endpoint_sample_test.py +++ b/samples/snippets/create_endpoint_sample_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os from uuid import uuid4 -import helpers +import pytest import create_endpoint_sample import delete_endpoint_sample +import helpers DISPLAY_NAME = f"temp_create_endpoint_test_{uuid4()}" PROJECT = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py index 6ec111bcac..ab8e975fcd 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid -import pytest import os +import uuid -import helpers +from google.cloud import aiplatform +import pytest import create_hyperparameter_tuning_job_python_package_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/create_hyperparameter_tuning_job_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_sample_test.py index 21b16121fb..78799d7554 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_sample_test.py +++ b/samples/snippets/create_hyperparameter_tuning_job_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid -import pytest import os +import uuid -import helpers +from google.cloud import aiplatform +import pytest import cancel_hyperparameter_tuning_job_sample import create_hyperparameter_tuning_job_sample import delete_hyperparameter_tuning_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") CONTAINER_IMAGE_URI = "gcr.io/ucaip-test/ucaip-training-test:latest" diff --git a/samples/snippets/create_training_pipeline_custom_job_sample.py b/samples/snippets/create_training_pipeline_custom_job_sample.py index f9e22bc867..e12a98b9ea 100644 --- a/samples/snippets/create_training_pipeline_custom_job_sample.py +++ b/samples/snippets/create_training_pipeline_custom_job_sample.py @@ -63,7 +63,7 @@ def create_training_pipeline_custom_job_sample( "training_task_inputs": training_task_inputs, "model_to_upload": { "display_name": model_display_name, - "container_spec": {"image_uri": image_uri, }, + "container_spec": {"image_uri": image_uri}, }, } parent = f"projects/{project}/locations/{location}" diff --git a/samples/snippets/create_training_pipeline_custom_job_sample_test.py b/samples/snippets/create_training_pipeline_custom_job_sample_test.py index fc593a5d44..1d3655c6f3 100644 --- a/samples/snippets/create_training_pipeline_custom_job_sample_test.py +++ b/samples/snippets/create_training_pipeline_custom_job_sample_test.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest import create_training_pipeline_custom_job_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DISPLAY_NAME = f"temp_create_training_pipeline_custom_job_test_{uuid4()}" @@ -44,7 +43,7 @@ def pipeline_client(): def teardown(shared_state, pipeline_client): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] + shared_state["training_pipeline_name"].split("/")[-1] pipeline_client.cancel_training_pipeline( name=shared_state["training_pipeline_name"] diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py index 3638b53abc..b8966697a1 100644 --- a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py @@ -32,10 +32,8 @@ def create_training_pipeline_custom_training_managed_dataset_sample( ): client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. - # This client only needs to be created once, and can be reused for - # multiple requests. - client = aiplatform.gapic.PipelineServiceClient( - client_options=client_options) + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) # input_data_config input_data_config = { @@ -45,14 +43,13 @@ def create_training_pipeline_custom_training_managed_dataset_sample( } # training_task_definition - custom_task_definition = "gs://google-cloud-aiplatform/schema/" \ - "trainingjob/definition/custom_task_1.0.0.yaml" + custom_task_definition = "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" # training_task_inputs training_container_spec = { "imageUri": training_container_spec_image_uri, # AIP_MODEL_DIR is set by the service according to baseOutputDirectory. - "args": ["--model-dir=$(AIP_MODEL_DIR)",], + "args": ["--model-dir=$(AIP_MODEL_DIR)"], } training_worker_pool_spec = { @@ -66,25 +63,16 @@ def create_training_pipeline_custom_training_managed_dataset_sample( "baseOutputDirectory": {"outputUriPrefix": base_output_uri_prefix}, } - training_task_inputs = json_format.ParseDict( - training_task_inputs_dict, Value()) + training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) # model_to_upload model_container_spec = { "image_uri": model_container_spec_image_uri, - "command": ["/bin/tensorflow_model_server"], - "args": [ - "--model_name=$(AIP_MODEL)", - "--model_base_path=$(AIP_STORAGE_URI)", - "--rest_api_port=8080", - "--port=8500", - "--file_system_poll_wait_seconds=31540000" - ], + "command": [], + "args": [], } - model = { - "display_name": model_display_name, - "container_spec": model_container_spec} + model = {"display_name": model_display_name, "container_spec": model_container_spec} training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py index 85c6f22734..6d75cf249c 100644 --- a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid -import pytest import os +import uuid from google.cloud import aiplatform - -import helpers +import pytest import create_training_pipeline_custom_training_managed_dataset_sample +import helpers API_ENDPOINT = "us-central1-aiplatform.googleapis.com" diff --git a/samples/snippets/create_training_pipeline_image_classification_sample_test.py b/samples/snippets/create_training_pipeline_image_classification_sample_test.py index 2c4ecab9b1..89e8804a92 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_image_classification_sample import cancel_training_pipeline_sample +import create_training_pipeline_image_classification_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "1084241610289446912" # Permanent 50 Flowers Dataset diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py b/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py index 0e76ae02f1..24f26f33e3 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_image_object_detection_sample import cancel_training_pipeline_sample +import create_training_pipeline_image_object_detection_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "3555732643297361920" # permanent_salad_object_detection_dataset diff --git a/samples/snippets/create_training_pipeline_sample_test.py b/samples/snippets/create_training_pipeline_sample_test.py index 1903ff4ce3..4677004e4e 100644 --- a/samples/snippets/create_training_pipeline_sample_test.py +++ b/samples/snippets/create_training_pipeline_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_sample import cancel_training_pipeline_sample +import create_training_pipeline_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "1084241610289446912" # Permanent 50 Flowers Dataset diff --git a/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py b/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py index 66cedbbd09..fe61a82188 100644 --- a/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_tabular_classification_sample import cancel_training_pipeline_sample +import create_training_pipeline_tabular_classification_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "2438839935709478912" # iris 1000 diff --git a/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py b/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py index 1b43876902..ab126f8df9 100644 --- a/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py +++ b/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_tabular_regression_sample import cancel_training_pipeline_sample +import create_training_pipeline_tabular_regression_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "3019804287640272896" # bq all diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py index a904d3d114..4f54c0fc51 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_text_entity_extraction_sample import cancel_training_pipeline_sample +import create_training_pipeline_text_entity_extraction_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "6203215905493614592" # Permanent text entity extraction dataset diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py index cb86ce3c24..ecaa041cc6 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_text_sentiment_analysis_sample import cancel_training_pipeline_sample +import create_training_pipeline_text_sentiment_analysis_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "5148529167758786560" # Permanent text sentiment analysis dataset diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_test.py b/samples/snippets/create_training_pipeline_video_action_recognition_test.py index ab653a49e1..cd73df7286 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_test.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_test.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid -import pytest import os +import uuid -import helpers +from google.cloud import aiplatform +import pytest import create_training_pipeline_video_action_recognition_sample - -from google.cloud import aiplatform +import helpers LOCATION = "us-central1" PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -49,6 +48,7 @@ def pipeline_client(): ) yield pipeline_client + @pytest.fixture(scope="function", autouse=True) def teardown(shared_state, pipeline_client): yield @@ -70,8 +70,6 @@ def teardown(shared_state, pipeline_client): ) - - # Training AutoML Vision Model def test_create_training_pipeline_video_action_recognition_sample( capsys, shared_state diff --git a/samples/snippets/create_training_pipeline_video_classification_sample_test.py b/samples/snippets/create_training_pipeline_video_classification_sample_test.py index 630a3830b6..cc0a826967 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_video_classification_sample import cancel_training_pipeline_sample +import create_training_pipeline_video_classification_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "3757409464110546944" # Permanent 5 class sports dataset diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py index 1c1eacb4ef..fd1788ae7b 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from uuid import uuid4 -import pytest import os +from uuid import uuid4 -import helpers +from google.cloud import aiplatform +import pytest -import create_training_pipeline_video_object_tracking_sample import cancel_training_pipeline_sample +import create_training_pipeline_video_object_tracking_sample import delete_training_pipeline_sample - -from google.cloud import aiplatform +import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") DATASET_ID = "1138566280794603520" # Permanent 40 horses dataset diff --git a/samples/snippets/deploy_model_custom_trained_model_sample_test.py b/samples/snippets/deploy_model_custom_trained_model_sample_test.py index 49f096fdb6..43cf53cdf7 100644 --- a/samples/snippets/deploy_model_custom_trained_model_sample_test.py +++ b/samples/snippets/deploy_model_custom_trained_model_sample_test.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud import aiplatform -import deploy_model_custom_trained_model_sample - +import os from uuid import uuid4 + +from google.cloud import aiplatform import pytest -import os +import deploy_model_custom_trained_model_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/deploy_model_sample_test.py b/samples/snippets/deploy_model_sample_test.py index 46f8c03f2e..a3cc96b879 100644 --- a/samples/snippets/deploy_model_sample_test.py +++ b/samples/snippets/deploy_model_sample_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud import aiplatform -import deploy_model_sample -import delete_endpoint_sample - +import os from uuid import uuid4 + +from google.cloud import aiplatform import pytest -import os +import delete_endpoint_sample +import deploy_model_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/export_model_tabular_classification_sample_test.py b/samples/snippets/export_model_tabular_classification_sample_test.py index b0b0c7e011..90b885ba2d 100644 --- a/samples/snippets/export_model_tabular_classification_sample_test.py +++ b/samples/snippets/export_model_tabular_classification_sample_test.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os - from uuid import uuid4 -import export_model_tabular_classification_sample from google.cloud import storage +import pytest + +import export_model_tabular_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") MODEL_ID = "5359002081594179584" # iris 1000 diff --git a/samples/snippets/export_model_video_action_recognition_test.py b/samples/snippets/export_model_video_action_recognition_test.py index fabb228147..ff7341114d 100644 --- a/samples/snippets/export_model_video_action_recognition_test.py +++ b/samples/snippets/export_model_video_action_recognition_test.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os -import export_model_video_action_recognition_sample from google.cloud import storage +import pytest + +import export_model_video_action_recognition_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") MODEL_ID = ( diff --git a/samples/snippets/get_custom_job_sample_test.py b/samples/snippets/get_custom_job_sample_test.py index 5e11f2d8da..f66aeb9020 100644 --- a/samples/snippets/get_custom_job_sample_test.py +++ b/samples/snippets/get_custom_job_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_custom_job_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_hyperparameter_tuning_job_sample_test.py b/samples/snippets/get_hyperparameter_tuning_job_sample_test.py index 839d056b43..06ac1627e7 100644 --- a/samples/snippets/get_hyperparameter_tuning_job_sample_test.py +++ b/samples/snippets/get_hyperparameter_tuning_job_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_hyperparameter_tuning_job_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_sample_test.py b/samples/snippets/get_model_evaluation_sample_test.py index bba69215ec..26490f4b07 100644 --- a/samples/snippets/get_model_evaluation_sample_test.py +++ b/samples/snippets/get_model_evaluation_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_slice_sample_test.py b/samples/snippets/get_model_evaluation_slice_sample_test.py index f8ed759f6d..9ce73344e8 100644 --- a/samples/snippets/get_model_evaluation_slice_sample_test.py +++ b/samples/snippets/get_model_evaluation_slice_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_slice_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py index dcfe5da069..e5e8cbc27a 100644 --- a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_tabular_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py index a0d97b0a77..97f539d3f8 100644 --- a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_test.py b/samples/snippets/get_model_evaluation_video_action_recognition_test.py index 973987e086..c7218141a6 100644 --- a/samples/snippets/get_model_evaluation_video_action_recognition_test.py +++ b/samples/snippets/get_model_evaluation_video_action_recognition_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_classification_sample_test.py b/samples/snippets/get_model_evaluation_video_classification_sample_test.py index cfb8a84f29..f1de95418c 100644 --- a/samples/snippets/get_model_evaluation_video_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_classification_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_video_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py index 4777889ed7..b9d972029f 100644 --- a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_evaluation_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_sample_test.py b/samples/snippets/get_model_sample_test.py index baf045205e..a6990d38b5 100644 --- a/samples/snippets/get_model_sample_test.py +++ b/samples/snippets/get_model_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_model_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_training_pipeline_sample_test.py b/samples/snippets/get_training_pipeline_sample_test.py index 2d3a29f481..4c3726431c 100644 --- a/samples/snippets/get_training_pipeline_sample_test.py +++ b/samples/snippets/get_training_pipeline_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import get_training_pipeline_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/helpers.py b/samples/snippets/helpers.py index 15a9f31e5c..d86da0238c 100644 --- a/samples/snippets/helpers.py +++ b/samples/snippets/helpers.py @@ -19,7 +19,7 @@ def get_state(out): def wait_for_job_state( - get_job_method: Callable[[str], "proto.Message"], + get_job_method: Callable[[str], "proto.Message"], # noqa: F821 name: str, expected_state: str = "CANCELLED", timeout: int = 90, diff --git a/samples/snippets/import_data_text_classification_single_label_sample_test.py b/samples/snippets/import_data_text_classification_single_label_sample_test.py index 1ea0afaab2..afcc0786cf 100644 --- a/samples/snippets/import_data_text_classification_single_label_sample_test.py +++ b/samples/snippets/import_data_text_classification_single_label_sample_test.py @@ -13,9 +13,8 @@ # limitations under the License. -import pytest -import os -from unittest.mock import patch, mock_open, MagicMock +from unittest.mock import MagicMock, mock_open, patch + import import_data_text_classification_single_label_sample diff --git a/samples/snippets/import_data_text_entity_extraction_sample_test.py b/samples/snippets/import_data_text_entity_extraction_sample_test.py index d74c6a322c..98c5df48f9 100644 --- a/samples/snippets/import_data_text_entity_extraction_sample_test.py +++ b/samples/snippets/import_data_text_entity_extraction_sample_test.py @@ -1,11 +1,11 @@ -import pytest import os - from uuid import uuid4 + from google.cloud import aiplatform +import pytest -import import_data_text_entity_extraction_sample import delete_dataset_sample +import import_data_text_entity_extraction_sample print( f"uCAIP Library Source:\t{aiplatform.__file__}" diff --git a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py index 3fd9b20538..b0252356f2 100644 --- a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py @@ -1,11 +1,11 @@ -import pytest import os - from uuid import uuid4 + from google.cloud import aiplatform +import pytest -import import_data_text_sentiment_analysis_sample import delete_dataset_sample +import import_data_text_sentiment_analysis_sample print( f"uCAIP Library Source:\t{aiplatform.__file__}" diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py index cacb56de70..7a2a58b752 100644 --- a/samples/snippets/import_data_video_action_recognition_test.py +++ b/samples/snippets/import_data_video_action_recognition_test.py @@ -13,15 +13,14 @@ # limitations under the License. -import pytest import os - import uuid + from google.cloud import aiplatform +import pytest import import_data_video_action_recognition_sample - PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" @@ -56,7 +55,7 @@ def teardown(shared_state, dataset_client): project=PROJECT_ID, location=LOCATION, dataset=shared_state["dataset_id"] ) response = dataset_client.delete_dataset(name=dataset_name) - delete_dataset_response = response.result(timeout=120) + response.result(timeout=120) def test_import_data_video_action_recognition_sample( diff --git a/samples/snippets/import_data_video_classification_sample_test.py b/samples/snippets/import_data_video_classification_sample_test.py index 0a9a1b08c9..b9d0cea46d 100644 --- a/samples/snippets/import_data_video_classification_sample_test.py +++ b/samples/snippets/import_data_video_classification_sample_test.py @@ -13,15 +13,14 @@ # limitations under the License. -import pytest import os - from uuid import uuid4 + from google.cloud import aiplatform +import pytest -import import_data_video_classification_sample import delete_dataset_sample - +import import_data_video_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/import_data_video_object_tracking_sample_test.py b/samples/snippets/import_data_video_object_tracking_sample_test.py index 457bf869b5..22b2e61710 100644 --- a/samples/snippets/import_data_video_object_tracking_sample_test.py +++ b/samples/snippets/import_data_video_object_tracking_sample_test.py @@ -13,15 +13,14 @@ # limitations under the License. -import pytest import os - from uuid import uuid4 + from google.cloud import aiplatform +import pytest -import import_data_video_object_tracking_sample import delete_dataset_sample - +import import_data_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" diff --git a/samples/snippets/list_model_evaluation_slices_sample_test.py b/samples/snippets/list_model_evaluation_slices_sample_test.py index 9e7e3823c0..b5833ba90d 100644 --- a/samples/snippets/list_model_evaluation_slices_sample_test.py +++ b/samples/snippets/list_model_evaluation_slices_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import list_model_evaluation_slices_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/predict_custom_trained_model_sample.py b/samples/snippets/predict_custom_trained_model_sample.py index 308b0956fd..46fd81f03b 100644 --- a/samples/snippets/predict_custom_trained_model_sample.py +++ b/samples/snippets/predict_custom_trained_model_sample.py @@ -13,10 +13,11 @@ # limitations under the License. # [START aiplatform_predict_custom_trained_model_sample] +from typing import Dict + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -from typing import Dict def predict_custom_trained_model_sample( diff --git a/samples/snippets/predict_custom_trained_model_sample_test.py b/samples/snippets/predict_custom_trained_model_sample_test.py index 2bb19c4c95..9b3a60df2a 100644 --- a/samples/snippets/predict_custom_trained_model_sample_test.py +++ b/samples/snippets/predict_custom_trained_model_sample_test.py @@ -12,13 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import os import base64 +import os import pathlib -import predict_custom_trained_model_sample +import predict_custom_trained_model_sample ENDPOINT_ID = "6119547468666372096" # permanent_custom_flowers_model PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index d90dd3732b..cee06ac5c3 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -13,10 +13,11 @@ # limitations under the License. # [START aiplatform_predict_image_classification_sample] +import base64 + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -import base64 def predict_image_classification_sample( diff --git a/samples/snippets/predict_image_classification_sample_test.py b/samples/snippets/predict_image_classification_sample_test.py index 828700c318..10e72bb386 100644 --- a/samples/snippets/predict_image_classification_sample_test.py +++ b/samples/snippets/predict_image_classification_sample_test.py @@ -12,13 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os -import base64 import pathlib -import predict_image_classification_sample +import predict_image_classification_sample ENDPOINT_ID = "71213169107795968" # permanent_50_flowers_endpoint PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/predict_image_object_detection_sample.py b/samples/snippets/predict_image_object_detection_sample.py index 4d3137830f..6fb7e065f8 100644 --- a/samples/snippets/predict_image_object_detection_sample.py +++ b/samples/snippets/predict_image_object_detection_sample.py @@ -13,10 +13,11 @@ # limitations under the License. # [START aiplatform_predict_image_object_detection_sample] +import base64 + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -import base64 def predict_image_object_detection_sample( diff --git a/samples/snippets/predict_image_object_detection_sample_test.py b/samples/snippets/predict_image_object_detection_sample_test.py index 4cb4c5bdc2..c4f4893a87 100644 --- a/samples/snippets/predict_image_object_detection_sample_test.py +++ b/samples/snippets/predict_image_object_detection_sample_test.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os -import base64 import pathlib + import predict_image_object_detection_sample ENDPOINT_ID = "2791387344039575552" # permanent_salad_img_obj_detection_endpoint diff --git a/samples/snippets/predict_sample.py b/samples/snippets/predict_sample.py index d3a4a21f51..756077e25e 100644 --- a/samples/snippets/predict_sample.py +++ b/samples/snippets/predict_sample.py @@ -14,10 +14,11 @@ # [START aiplatform_predict_tutorial] # [START aiplatform_predict_sample] +from typing import Dict + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -from typing import Dict def predict_sample( diff --git a/samples/snippets/predict_tabular_classification_sample.py b/samples/snippets/predict_tabular_classification_sample.py index fb09770789..31533a0b79 100644 --- a/samples/snippets/predict_tabular_classification_sample.py +++ b/samples/snippets/predict_tabular_classification_sample.py @@ -13,10 +13,11 @@ # limitations under the License. # [START aiplatform_predict_tabular_classification_sample] +from typing import Dict + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -from typing import Dict def predict_tabular_classification_sample( diff --git a/samples/snippets/predict_tabular_classification_sample_test.py b/samples/snippets/predict_tabular_classification_sample_test.py index db3410a315..51eed490bd 100644 --- a/samples/snippets/predict_tabular_classification_sample_test.py +++ b/samples/snippets/predict_tabular_classification_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import predict_tabular_classification_sample ENDPOINT_ID = "4966625964059525120" # iris 1000 diff --git a/samples/snippets/predict_tabular_regression_sample.py b/samples/snippets/predict_tabular_regression_sample.py index 667a10ecd4..475b02432b 100644 --- a/samples/snippets/predict_tabular_regression_sample.py +++ b/samples/snippets/predict_tabular_regression_sample.py @@ -13,10 +13,11 @@ # limitations under the License. # [START aiplatform_predict_tabular_regression_sample] +from typing import Dict + from google.cloud import aiplatform from google.protobuf import json_format from google.protobuf.struct_pb2 import Value -from typing import Dict def predict_tabular_regression_sample( diff --git a/samples/snippets/predict_tabular_regression_sample_test.py b/samples/snippets/predict_tabular_regression_sample_test.py index 12af6022fc..0d98347f35 100644 --- a/samples/snippets/predict_tabular_regression_sample_test.py +++ b/samples/snippets/predict_tabular_regression_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import predict_tabular_regression_sample ENDPOINT_ID = "1014154341088493568" # bq all diff --git a/samples/snippets/predict_text_classification_single_label_sample_test.py b/samples/snippets/predict_text_classification_single_label_sample_test.py index d4c6d88a9d..be502d525d 100644 --- a/samples/snippets/predict_text_classification_single_label_sample_test.py +++ b/samples/snippets/predict_text_classification_single_label_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import predict_text_classification_single_label_sample ENDPOINT_ID = "65372563341049856" # text_classification_single_label diff --git a/samples/snippets/predict_text_entity_extraction_sample_test.py b/samples/snippets/predict_text_entity_extraction_sample_test.py index 4207b42396..cc9236afd9 100644 --- a/samples/snippets/predict_text_entity_extraction_sample_test.py +++ b/samples/snippets/predict_text_entity_extraction_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import predict_text_entity_extraction_sample ENDPOINT_ID = "6207156555167563776" # text_entity_extraction endpoint diff --git a/samples/snippets/predict_text_sentiment_analysis_sample_test.py b/samples/snippets/predict_text_sentiment_analysis_sample_test.py index e66595b944..b13969224f 100644 --- a/samples/snippets/predict_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/predict_text_sentiment_analysis_sample_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import os + import predict_text_sentiment_analysis_sample ENDPOINT_ID = "7811563922418302976" # sentiment analysis endpoint diff --git a/samples/snippets/upload_model_sample_test.py b/samples/snippets/upload_model_sample_test.py index 7cc9635de6..78943f666e 100644 --- a/samples/snippets/upload_model_sample_test.py +++ b/samples/snippets/upload_model_sample_test.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from uuid import uuid4 + import pytest -import os +import delete_model_sample import helpers - import upload_model_sample -import delete_model_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") IMAGE_URI = "gcr.io/cloud-ml-service-public/cloud-ml-online-prediction-model-server-cpu:v1_15py3cmle_op_images_20200229_0210_RC00" From 96a850f2d24d7ae95f2cdec83a56362abecb85a2 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 1 Dec 2020 13:55:27 -0800 Subject: [PATCH 13/34] feat: add create_batch_prediction_job samples (#67) * chore: sample tests lint * lint * lnt * lint * feat: add create_batch_prediction_job samples * lint --- ...te_batch_prediction_job_bigquery_sample.py | 62 ++++++++++++++ ...tch_prediction_job_bigquery_sample_test.py | 82 ++++++++++++++++++ .../create_batch_prediction_job_sample.py | 69 +++++++++++++++ ...create_batch_prediction_job_sample_test.py | 83 +++++++++++++++++++ 4 files changed, 296 insertions(+) create mode 100644 samples/snippets/create_batch_prediction_job_bigquery_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_bigquery_sample_test.py create mode 100644 samples/snippets/create_batch_prediction_job_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_sample_test.py diff --git a/samples/snippets/create_batch_prediction_job_bigquery_sample.py b/samples/snippets/create_batch_prediction_job_bigquery_sample.py new file mode 100644 index 0000000000..7747333cab --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_bigquery_sample.py @@ -0,0 +1,62 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_bigquery_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_bigquery_sample( + project: str, + display_name: str, + model_name: str, + instances_format: str, + bigquery_source_input_uri: str, + predictions_format: str, + bigquery_destination_output_uri: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = {} + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model_name, + "model_parameters": model_parameters, + "input_config": { + "instances_format": instances_format, + "bigquery_source": {"input_uri": bigquery_source_input_uri}, + }, + "output_config": { + "predictions_format": predictions_format, + "bigquery_destination": {"output_uri": bigquery_destination_output_uri}, + }, + # optional + "generate_explanation": True, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_bigquery_sample] diff --git a/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py new file mode 100644 index 0000000000..663180ef35 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py @@ -0,0 +1,82 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from uuid import uuid4 + +from google.cloud import aiplatform +import pytest + +import create_batch_prediction_job_bigquery_sample +import helpers + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "3125638878883479552" # bq all +DISPLAY_NAME = f"temp_create_batch_prediction_job_test_{uuid4()}" +BIGQUERY_SOURCE_INPUT_URI = "bq://ucaip-sample-tests.table_test.all_bq_types" +BIGQUERY_DESTINATION_OUTPUT_URI = "bq://ucaip-sample-tests" +INSTANCES_FORMAT = "bigquery" +PREDICTIONS_FORMAT = "bigquery" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + return job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, job_client): + yield + + job_client.cancel_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) + + # Waiting until the job is in CANCELLED state. + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + job_client.delete_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) + + +def test_ucaip_generated_create_batch_prediction_job_bigquery_sample( + capsys, shared_state +): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_bigquery_sample.create_batch_prediction_job_bigquery_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model_name=model_name, + bigquery_source_input_uri=BIGQUERY_SOURCE_INPUT_URI, + bigquery_destination_output_uri=BIGQUERY_DESTINATION_OUTPUT_URI, + instances_format=INSTANCES_FORMAT, + predictions_format=PREDICTIONS_FORMAT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) diff --git a/samples/snippets/create_batch_prediction_job_sample.py b/samples/snippets/create_batch_prediction_job_sample.py new file mode 100644 index 0000000000..ea89e7b885 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_sample.py @@ -0,0 +1,69 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_batch_prediction_job_sample( + project: str, + display_name: str, + model_name: str, + instances_format: str, + gcs_source_uri: str, + predictions_format: str, + gcs_destination_output_uri_prefix: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = {} + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model_name, + "model_parameters": model_parameters, + "input_config": { + "instances_format": instances_format, + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": predictions_format, + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + "dedicated_resources": { + "machine_spec": { + "machine_type": "n1-standard-2", + "accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + "accelerator_count": 1, + }, + "starting_replica_count": 1, + "max_replica_count": 1, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_sample] diff --git a/samples/snippets/create_batch_prediction_job_sample_test.py b/samples/snippets/create_batch_prediction_job_sample_test.py new file mode 100644 index 0000000000..6804928024 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_sample_test.py @@ -0,0 +1,83 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from uuid import uuid4 + +from google.cloud import aiplatform +import pytest + +import create_batch_prediction_job_sample +import helpers + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "1478306577684365312" # Permanent 50 flowers model +DISPLAY_NAME = f"temp_create_batch_prediction_job_test_{uuid4()}" +GCS_SOURCE_URI = ( + "gs://ucaip-samples-test-output/inputs/icn_batch_prediction_input.jsonl" +) +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" +INSTANCES_FORMAT = "jsonl" +PREDICTIONS_FORMAT = "jsonl" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def job_client(): + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + return job_client + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state, job_client): + yield + + job_client.cancel_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) + + # Waiting until the job is in CANCELLED state. + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + job_client.delete_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) + + +# Creating AutoML Vision Classification batch prediction job +def test_ucaip_generated_create_batch_prediction_sample(capsys, shared_state): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_sample.create_batch_prediction_job_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model_name=model_name, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + instances_format=INSTANCES_FORMAT, + predictions_format=PREDICTIONS_FORMAT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) From e5ddd3caee3db347a233bd2678c5b4cd8d8657bc Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Tue, 1 Dec 2020 14:30:05 -0800 Subject: [PATCH 14/34] chore: updates synth.py to use Bazel generator (#88) --- synth.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/synth.py b/synth.py index 935bf20fce..8685e21af7 100644 --- a/synth.py +++ b/synth.py @@ -20,9 +20,7 @@ import synthtool.gcp as gcp from synthtool.languages import python -# Use the microgenerator for now since we want to pin the generator version. -# gapic = gcp.GAPICBazel() -gapic = gcp.GAPICMicrogenerator() +gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() @@ -30,12 +28,11 @@ # Generate AI Platform GAPIC layer # ---------------------------------------------------------------------------- -# library = gapic.py_library( -# service="aiplatform", -# version="v1beta1", -# bazel_target="//google/cloud/aiplatform/v1beta1:aiplatform-v1beta1-py", -# ) -library = gapic.py_library("aiplatform", "v1beta1") +library = gapic.py_library( + service="aiplatform", + version="v1beta1", + bazel_target="//google/cloud/aiplatform/v1beta1:aiplatform-v1beta1-py", +) s.move( library, From e6acf3781fe52eaa59dad97ccddeecfbe90c60d5 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 1 Dec 2020 17:07:24 -0700 Subject: [PATCH 15/34] chore: require samples checks (#101) * chore: require samples checks * Update sync-repo-settings.yaml --- .github/sync-repo-settings.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/sync-repo-settings.yaml diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml new file mode 100644 index 0000000000..4930eaccc6 --- /dev/null +++ b/.github/sync-repo-settings.yaml @@ -0,0 +1,11 @@ +# https://github.com/googleapis/repo-automation-bots/tree/master/packages/sync-repo-settings +# Rules for master branch protection +branchProtectionRules: +# Identifies the protection rule pattern. Name of the branch to be protected. +# Defaults to `master` +- pattern: master + requiredStatusCheckContexts: + - 'Kokoro' + - 'cla/google' + - 'Samples - Lint' + - 'Samples - Python 3.7' From 5362a4d80ec00b8b8826e71ccd6bf69fbd19b921 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 4 Dec 2020 13:11:52 -0800 Subject: [PATCH 16/34] chore: update sample test resouce names (#120) * chore: update sample test resource names --- .../create_batch_prediction_job_bigquery_sample_test.py | 2 +- samples/snippets/create_batch_prediction_job_sample_test.py | 2 +- ..._batch_prediction_job_text_classification_sample_test.py | 2 +- ...tch_prediction_job_text_entity_extraction_sample_test.py | 2 +- ...batch_prediction_job_video_classification_sample_test.py | 2 +- ...atch_prediction_job_video_object_tracking_sample_test.py | 2 +- .../export_model_tabular_classification_sample_test.py | 2 +- samples/snippets/get_model_evaluation_sample_test.py | 4 ++-- samples/snippets/get_model_evaluation_slice_sample_test.py | 6 +++--- ...t_model_evaluation_tabular_classification_sample_test.py | 4 ++-- .../get_model_evaluation_tabular_regression_sample_test.py | 4 ++-- ...get_model_evaluation_video_classification_sample_test.py | 4 ++-- ...et_model_evaluation_video_object_tracking_sample_test.py | 4 ++-- samples/snippets/get_model_sample_test.py | 2 +- .../import_data_text_sentiment_analysis_sample_test.py | 2 +- .../snippets/import_data_video_action_recognition_test.py | 2 +- .../snippets/list_model_evaluation_slices_sample_test.py | 6 +++--- 17 files changed, 26 insertions(+), 26 deletions(-) diff --git a/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py index 663180ef35..b5c9b1d98e 100644 --- a/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py @@ -23,7 +23,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "3125638878883479552" # bq all +MODEL_ID = "8842430840248991744" # bq all DISPLAY_NAME = f"temp_create_batch_prediction_job_test_{uuid4()}" BIGQUERY_SOURCE_INPUT_URI = "bq://ucaip-sample-tests.table_test.all_bq_types" BIGQUERY_DESTINATION_OUTPUT_URI = "bq://ucaip-sample-tests" diff --git a/samples/snippets/create_batch_prediction_job_sample_test.py b/samples/snippets/create_batch_prediction_job_sample_test.py index 6804928024..62a03aefc7 100644 --- a/samples/snippets/create_batch_prediction_job_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_sample_test.py @@ -23,7 +23,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "1478306577684365312" # Permanent 50 flowers model +MODEL_ID = "3512561418744365056" # Permanent 50 flowers model DISPLAY_NAME = f"temp_create_batch_prediction_job_test_{uuid4()}" GCS_SOURCE_URI = ( "gs://ucaip-samples-test-output/inputs/icn_batch_prediction_input.jsonl" diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py index 003b858416..c1f033676d 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py @@ -25,7 +25,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "3863595899074641920" # Permanent restaurant rating model +MODEL_ID = "7827432074230366208" # Permanent restaurant rating model DISPLAY_NAME = f"temp_create_batch_prediction_tcn_test_{uuid4()}" GCS_SOURCE_URI = ( "gs://ucaip-samples-test-output/inputs/batch_predict_TCN/tcn_inputs.jsonl" diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py index 069730a9b5..bf920c22cf 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py @@ -25,7 +25,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "5216364637146054656" # Permanent medical entity NL model +MODEL_ID = "6305215400179138560" # Permanent medical entity NL model DISPLAY_NAME = f"temp_create_batch_prediction_ten_test_{uuid4()}" GCS_SOURCE_URI = ( "gs://ucaip-samples-test-output/inputs/batch_predict_TEN/ten_inputs.jsonl" diff --git a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py index f9344ab6ec..d62d684abe 100644 --- a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py @@ -25,7 +25,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "667940119734386688" # Permanent 5 class sports model +MODEL_ID = "8596984660557299712" # Permanent 5 class sports model DISPLAY_NAME = f"temp_create_batch_prediction_vcn_test_{uuid4()}" GCS_SOURCE_URI = ( "gs://ucaip-samples-test-output/inputs/vcn_40_batch_prediction_input.jsonl" diff --git a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py index 46ace425d6..3946db4e69 100644 --- a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py @@ -25,7 +25,7 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -MODEL_ID = "20547673299877888" # Permanent horses model +MODEL_ID = "8609932509485989888" # Permanent horses model DISPLAY_NAME = f"temp_create_batch_prediction_vot_test_{uuid4()}" GCS_SOURCE_URI = ( "gs://ucaip-samples-test-output/inputs/vot_batch_prediction_input.jsonl" diff --git a/samples/snippets/export_model_tabular_classification_sample_test.py b/samples/snippets/export_model_tabular_classification_sample_test.py index 90b885ba2d..4145f7d050 100644 --- a/samples/snippets/export_model_tabular_classification_sample_test.py +++ b/samples/snippets/export_model_tabular_classification_sample_test.py @@ -21,7 +21,7 @@ import export_model_tabular_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "5359002081594179584" # iris 1000 +MODEL_ID = "6036688272397172736" # iris 1000 GCS_BUCKET = "gs://ucaip-samples-test-output" GCS_PREFIX = f"tmp/export_model_test_{uuid4()}" diff --git a/samples/snippets/get_model_evaluation_sample_test.py b/samples/snippets/get_model_evaluation_sample_test.py index 26490f4b07..57e1b95f63 100644 --- a/samples/snippets/get_model_evaluation_sample_test.py +++ b/samples/snippets/get_model_evaluation_sample_test.py @@ -18,8 +18,8 @@ import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "5162251072873431040" # permanent_safe_driver_model -EVALUATION_ID = "5615675837586029221" # permanent_safe_driver_model Evaluation +MODEL_ID = "3512561418744365056" # permanent_safe_driver_model +EVALUATION_ID = "9035588644970168320" # permanent_safe_driver_model Evaluation def test_ucaip_generated_get_model_evaluation_sample(capsys): diff --git a/samples/snippets/get_model_evaluation_slice_sample_test.py b/samples/snippets/get_model_evaluation_slice_sample_test.py index 9ce73344e8..3e2cea3e5e 100644 --- a/samples/snippets/get_model_evaluation_slice_sample_test.py +++ b/samples/snippets/get_model_evaluation_slice_sample_test.py @@ -18,9 +18,9 @@ import get_model_evaluation_slice_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "5162251072873431040" # permanent_safe_driver_model -EVALUATION_ID = "5615675837586029221" # permanent_safe_driver_model Evaluation -SLICE_ID = "4322488217836113260" # permanent_safe_driver_model Eval Slice +MODEL_ID = "3512561418744365056" # permanent_safe_driver_model +EVALUATION_ID = "9035588644970168320" # permanent_safe_driver_model Evaluation +SLICE_ID = "6481571820677004173" # permanent_safe_driver_model Eval Slice def test_ucaip_generated_get_model_evaluation_slice_sample(capsys): diff --git a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py index e5e8cbc27a..2d9ddb7356 100644 --- a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py @@ -18,8 +18,8 @@ import get_model_evaluation_tabular_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "5162251072873431040" # permanent_safe_driver_model -EVALUATION_ID = "5615675837586029221" # permanent_safe_driver_model Evaluation +MODEL_ID = "6036688272397172736" # permanent_safe_driver_model +EVALUATION_ID = "1866113044163962838" # permanent_safe_driver_model Evaluation def test_ucaip_generated_get_model_evaluation_tabular_classification_sample(capsys): diff --git a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py index 97f539d3f8..2481021cd3 100644 --- a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py @@ -18,8 +18,8 @@ import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "3125638878883479552" # bq all -EVALUATION_ID = "2025948722346981108" # bq all evaluation +MODEL_ID = "8842430840248991744" # bq all +EVALUATION_ID = "4944816689650806017" # bq all evaluation def test_ucaip_generated_get_model_evaluation_sample(capsys): diff --git a/samples/snippets/get_model_evaluation_video_classification_sample_test.py b/samples/snippets/get_model_evaluation_video_classification_sample_test.py index f1de95418c..01c0b4cefa 100644 --- a/samples/snippets/get_model_evaluation_video_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_classification_sample_test.py @@ -18,8 +18,8 @@ import get_model_evaluation_video_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "667940119734386688" # Permanent sports model -EVALUATION_ID = "789396572185034752" # Permanent sports evaluation +MODEL_ID = "8596984660557299712" # Permanent sports model +EVALUATION_ID = "7092045712224944128" # Permanent sports evaluation def test_ucaip_generated_get_model_evaluation_sample(capsys): diff --git a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py index b9d972029f..e924040d42 100644 --- a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py @@ -18,8 +18,8 @@ import get_model_evaluation_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "20547673299877888" # Permanent horses model -EVALUATION_ID = "1165447141070471168" # Permanent horses evaluation +MODEL_ID = "8609932509485989888" # Permanent horses model +EVALUATION_ID = "6016811301190238208" # Permanent horses evaluation def test_ucaip_generated_get_model_evaluation_sample(capsys): diff --git a/samples/snippets/get_model_sample_test.py b/samples/snippets/get_model_sample_test.py index a6990d38b5..8c3afb86d9 100644 --- a/samples/snippets/get_model_sample_test.py +++ b/samples/snippets/get_model_sample_test.py @@ -18,7 +18,7 @@ import get_model_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "1478306577684365312" # permanent_50_flowers_model +MODEL_ID = "3512561418744365056" # permanent_50_flowers_model KNOWN_MODEL = f"/locations/us-central1/models/{MODEL_ID}" diff --git a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py index b0252356f2..76f8512475 100644 --- a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py @@ -36,7 +36,7 @@ def dataset_name(): parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset ) - created_dataset = operation.result(timeout=120) + created_dataset = operation.result(timeout=600) yield created_dataset.name diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py index 7a2a58b752..f8912bfa7a 100644 --- a/samples/snippets/import_data_video_action_recognition_test.py +++ b/samples/snippets/import_data_video_action_recognition_test.py @@ -70,7 +70,7 @@ def test_import_data_video_action_recognition_sample( parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset ) - create_dataset_response = response.result(timeout=120) + create_dataset_response = response.result(timeout=600) shared_state["dataset_name"] = create_dataset_response.name shared_state["dataset_id"] = create_dataset_response.name.split("/")[-1] diff --git a/samples/snippets/list_model_evaluation_slices_sample_test.py b/samples/snippets/list_model_evaluation_slices_sample_test.py index b5833ba90d..4b759c51f8 100644 --- a/samples/snippets/list_model_evaluation_slices_sample_test.py +++ b/samples/snippets/list_model_evaluation_slices_sample_test.py @@ -18,10 +18,10 @@ import list_model_evaluation_slices_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -MODEL_ID = "5162251072873431040" -EVALUATION_ID = "5615675837586029221" +MODEL_ID = "3512561418744365056" +EVALUATION_ID = "9035588644970168320" -KNOWN_EVALUATION_SLICE = "/locations/us-central1/models/5162251072873431040/evaluations/5615675837586029221/slices/4322488217836113260" +KNOWN_EVALUATION_SLICE = "projects/580378083368/locations/us-central1/models/3512561418744365056/evaluations/9035588644970168320/slices/6481571820677004173" def test_ucaip_generated_get_model_evaluation_slices_sample(capsys): From 5ddbf16f35234dc1781de9d17310a345ac1524de Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Fri, 4 Dec 2020 16:39:58 -0800 Subject: [PATCH 17/34] feat: initial generation of enhanced types (#102) * feat: initial generation of enhanced types --- docs/conf.py | 6 +- docs/definition_v1beta1/types.rst | 6 + docs/instance_v1beta1/types.rst | 6 + docs/params_v1beta1/types.rst | 6 + docs/prediction_v1beta1/types.rst | 6 + .../schema/predict/instance/__init__.py | 56 ++ .../v1beta1/schema/predict/instance/py.typed | 2 + .../predict/instance_v1beta1/__init__.py | 39 ++ .../schema/predict/instance_v1beta1/py.typed | 2 + .../instance_v1beta1/types/__init__.py | 39 ++ .../types/image_classification.py | 51 ++ .../types/image_object_detection.py | 51 ++ .../types/image_segmentation.py | 45 ++ .../types/text_classification.py | 44 ++ .../instance_v1beta1/types/text_extraction.py | 55 ++ .../instance_v1beta1/types/text_sentiment.py | 44 ++ .../types/video_action_recognition.py | 64 +++ .../types/video_classification.py | 64 +++ .../types/video_object_tracking.py | 64 +++ .../v1beta1/schema/predict/params/__init__.py | 44 ++ .../v1beta1/schema/predict/params/py.typed | 2 + .../schema/predict/params_v1beta1/__init__.py | 33 ++ .../schema/predict/params_v1beta1/py.typed | 2 + .../predict/params_v1beta1/types/__init__.py | 33 ++ .../types/image_classification.py | 47 ++ .../types/image_object_detection.py | 48 ++ .../types/image_segmentation.py | 42 ++ .../types/video_action_recognition.py | 48 ++ .../types/video_classification.py | 85 +++ .../types/video_object_tracking.py | 54 ++ .../schema/predict/prediction/__init__.py | 64 +++ .../schema/predict/prediction/py.typed | 2 + .../predict/prediction_v1beta1/__init__.py | 43 ++ .../predict/prediction_v1beta1/py.typed | 2 + .../prediction_v1beta1/types/__init__.py | 43 ++ .../types/classification.py | 51 ++ .../types/image_object_detection.py | 64 +++ .../types/image_segmentation.py | 57 ++ .../types/tabular_classification.py | 47 ++ .../types/tabular_regression.py | 46 ++ .../types/text_extraction.py | 67 +++ .../types/text_sentiment.py | 68 +++ .../types/time_series_forecasting.py | 46 ++ .../types/video_action_recognition.py | 74 +++ .../types/video_classification.py | 90 ++++ .../types/video_object_tracking.py | 115 +++++ .../schema/trainingjob/definition/__init__.py | 132 +++++ .../schema/trainingjob/definition/py.typed | 2 + .../definition_v1beta1/__init__.py | 77 +++ .../trainingjob/definition_v1beta1/py.typed | 2 + .../definition_v1beta1/types/__init__.py | 99 ++++ .../types/automl_forecasting.py | 486 ++++++++++++++++++ .../types/automl_image_classification.py | 143 ++++++ .../types/automl_image_object_detection.py | 128 +++++ .../types/automl_image_segmentation.py | 121 +++++ .../definition_v1beta1/types/automl_tables.py | 447 ++++++++++++++++ .../types/automl_text_classification.py | 52 ++ .../types/automl_text_extraction.py | 43 ++ .../types/automl_text_sentiment.py | 59 +++ .../types/automl_video_action_recognition.py | 58 +++ .../types/automl_video_classification.py | 58 +++ .../types/automl_video_object_tracking.py | 62 +++ .../export_evaluated_data_items_config.py | 52 ++ google/cloud/aiplatform_v1beta1/__init__.py | 4 +- .../services/dataset_service/async_client.py | 20 +- .../dataset_service/transports/base.py | 20 +- .../services/endpoint_service/async_client.py | 14 +- .../endpoint_service/transports/base.py | 14 +- .../services/job_service/async_client.py | 40 +- .../services/job_service/transports/base.py | 40 +- .../services/model_service/async_client.py | 20 +- .../services/model_service/transports/base.py | 22 +- .../services/pipeline_service/async_client.py | 10 +- .../pipeline_service/transports/base.py | 10 +- .../prediction_service/async_client.py | 4 +- .../prediction_service/transports/base.py | 4 +- .../specialist_pool_service/async_client.py | 10 +- .../transports/base.py | 10 +- .../aiplatform_v1beta1/types/__init__.py | 412 +++++++-------- noxfile.py | 7 +- synth.metadata | 15 +- synth.py | 14 + 82 files changed, 4339 insertions(+), 339 deletions(-) create mode 100644 docs/definition_v1beta1/types.rst create mode 100644 docs/instance_v1beta1/types.rst create mode 100644 docs/params_v1beta1/types.rst create mode 100644 docs/prediction_v1beta1/types.rst create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py diff --git a/docs/conf.py b/docs/conf.py index effa4a8f1f..98e68be241 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -345,10 +345,10 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "python": ("https://python.readthedocs.org/en/latest/", None), + "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), - "grpc": ("https://grpc.io/grpc/python/", None), + "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/docs/definition_v1beta1/types.rst b/docs/definition_v1beta1/types.rst new file mode 100644 index 0000000000..3f351d03fc --- /dev/null +++ b/docs/definition_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +=================================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/instance_v1beta1/types.rst b/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..c52ae4800c --- /dev/null +++ b/docs/instance_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/params_v1beta1/types.rst b/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..ce7a29cb01 --- /dev/null +++ b/docs/params_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :show-inheritance: diff --git a/docs/prediction_v1beta1/types.rst b/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..cdbe7f2842 --- /dev/null +++ b/docs/prediction_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :show-inheritance: diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..2f514ac4ed --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ( + ImageClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import ( + TextClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import ( + TextExtractionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import ( + TextSentimentPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import ( + VideoClassificationPredictionInstance, +) +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + "ImageClassificationPredictionInstance", + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..f6d9a128ad --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + + +__all__ = ( + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", + "ImageClassificationPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..3160c08e1d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .image_classification import ImageClassificationPredictionInstance +from .image_object_detection import ImageObjectDetectionPredictionInstance +from .image_segmentation import ImageSegmentationPredictionInstance +from .text_classification import TextClassificationPredictionInstance +from .text_extraction import TextExtractionPredictionInstance +from .text_sentiment import TextSentimentPredictionInstance +from .video_action_recognition import VideoActionRecognitionPredictionInstance +from .video_classification import VideoClassificationPredictionInstance +from .video_object_tracking import VideoObjectTrackingPredictionInstance + + +__all__ = ( + "ImageClassificationPredictionInstance", + "ImageObjectDetectionPredictionInstance", + "ImageSegmentationPredictionInstance", + "TextClassificationPredictionInstance", + "TextExtractionPredictionInstance", + "TextSentimentPredictionInstance", + "VideoActionRecognitionPredictionInstance", + "VideoClassificationPredictionInstance", + "VideoObjectTrackingPredictionInstance", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..84b1ef0bbe --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageClassificationPredictionInstance",}, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..79c3efc2c6 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageObjectDetectionPredictionInstance",}, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + + Attributes: + content (str): + The image bytes or GCS URI to make the + prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..5a3232c6d2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"ImageSegmentationPredictionInstance",}, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..a615dc7e49 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextClassificationPredictionInstance",}, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..c6fecf80b7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextExtractionPredictionInstance",}, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. AI Platform will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + key = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..69836d0e96 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"TextSentimentPredictionInstance",}, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..89be6318f8 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoActionRecognitionPredictionInstance",}, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..41ab3bc217 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoClassificationPredictionInstance",}, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..3729c14816 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.instance", + manifest={"VideoObjectTrackingPredictionInstance",}, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "Infinity" is + allowed, which means the end of the video. + """ + + content = proto.Field(proto.STRING, number=1) + + mime_type = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field(proto.STRING, number=3) + + time_segment_end = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..dc7cd58e9a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ( + ImageClassificationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import ( + VideoClassificationPredictionParams, +) +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + "ImageClassificationPredictionParams", + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..79fb1c2097 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + + +__all__ = ( + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", + "ImageClassificationPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..39202720fa --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .image_classification import ImageClassificationPredictionParams +from .image_object_detection import ImageObjectDetectionPredictionParams +from .image_segmentation import ImageSegmentationPredictionParams +from .video_action_recognition import VideoActionRecognitionPredictionParams +from .video_classification import VideoClassificationPredictionParams +from .video_object_tracking import VideoObjectTrackingPredictionParams + + +__all__ = ( + "ImageClassificationPredictionParams", + "ImageObjectDetectionPredictionParams", + "ImageSegmentationPredictionParams", + "VideoActionRecognitionPredictionParams", + "VideoClassificationPredictionParams", + "VideoObjectTrackingPredictionParams", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..681a8c3d87 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageClassificationPredictionParams",}, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..146dd324b7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageObjectDetectionPredictionParams",}, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..aa11739a61 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"ImageSegmentationPredictionParams",}, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..c1f8f9f3bc --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoActionRecognitionPredictionParams",}, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..1b8d84a7d1 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoClassificationPredictionParams",}, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. AI Platform returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. AI Platform determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. AI Platform then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. AI Platform + returns labels and their confidence scores for + each second of the entire time segment of the + video that user specified in the input WARNING: + Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. Default value + is false + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + segment_classification = proto.Field(proto.BOOL, number=3) + + shot_classification = proto.Field(proto.BOOL, number=4) + + one_sec_interval_classification = proto.Field(proto.BOOL, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..4c0b6846bc --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.params", + manifest={"VideoObjectTrackingPredictionParams",}, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field(proto.FLOAT, number=1) + + max_predictions = proto.Field(proto.INT32, number=2) + + min_bounding_box_size = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..4447d3770a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ( + ClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ( + ImageSegmentationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import ( + TabularClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import ( + TabularRegressionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import ( + TextExtractionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import ( + TextSentimentPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import ( + VideoClassificationPredictionResult, +) +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + "ClassificationPredictionResult", + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..37066cd8b3 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + + +__all__ = ( + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", + "ClassificationPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..2d6c8a98d3 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .classification import ClassificationPredictionResult +from .image_object_detection import ImageObjectDetectionPredictionResult +from .image_segmentation import ImageSegmentationPredictionResult +from .tabular_classification import TabularClassificationPredictionResult +from .tabular_regression import TabularRegressionPredictionResult +from .text_extraction import TextExtractionPredictionResult +from .text_sentiment import TextSentimentPredictionResult +from .time_series_forecasting import TimeSeriesForecastingPredictionResult +from .video_action_recognition import VideoActionRecognitionPredictionResult +from .video_classification import VideoClassificationPredictionResult +from .video_object_tracking import VideoObjectTrackingPredictionResult + + +__all__ = ( + "ClassificationPredictionResult", + "ImageObjectDetectionPredictionResult", + "ImageSegmentationPredictionResult", + "TabularClassificationPredictionResult", + "TabularRegressionPredictionResult", + "TextExtractionPredictionResult", + "TextSentimentPredictionResult", + "TimeSeriesForecastingPredictionResult", + "VideoActionRecognitionPredictionResult", + "VideoClassificationPredictionResult", + "VideoObjectTrackingPredictionResult", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..3bfe82f64e --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ClassificationPredictionResult",}, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + confidences = proto.RepeatedField(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..1bf5002c2a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import struct_pb2 as struct # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ImageObjectDetectionPredictionResult",}, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[~.struct.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + confidences = proto.RepeatedField(proto.FLOAT, number=3) + + bboxes = proto.RepeatedField(proto.MESSAGE, number=4, message=struct.ListValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..195dea6f79 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"ImageSegmentationPredictionResult",}, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + + Attributes: + category_mask (bytes): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (bytes): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field(proto.BYTES, number=1) + + confidence_mask = proto.Field(proto.BYTES, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..4906ad59a5 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TabularClassificationPredictionResult",}, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField(proto.STRING, number=1) + + scores = proto.RepeatedField(proto.FLOAT, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..71d535c1f0 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TabularRegressionPredictionResult",}, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field(proto.FLOAT, number=1) + + lower_bound = proto.Field(proto.FLOAT, number=2) + + upper_bound = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..e3c10b5d75 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TextExtractionPredictionResult",}, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField(proto.INT64, number=1) + + display_names = proto.RepeatedField(proto.STRING, number=2) + + text_segment_start_offsets = proto.RepeatedField(proto.INT64, number=3) + + text_segment_end_offsets = proto.RepeatedField(proto.INT64, number=4) + + confidences = proto.RepeatedField(proto.FLOAT, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..192e50419d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance import text_sentiment_pb2 as gcaspi_text_sentiment # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TextSentimentPredictionResult",}, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Represents a line of JSONL in the text sentiment batch + prediction output file. This is a hack to allow printing of + integer values. + + Attributes: + instance (~.gcaspi_text_sentiment.TextSentimentPredictionInstance): + User's input instance. + prediction (~.gcaspp_text_sentiment.TextSentimentPredictionResult.Prediction): + The prediction result. + """ + + class Prediction(proto.Message): + r"""Prediction output format for Text Sentiment. + + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field(proto.INT32, number=1) + + instance = proto.Field( + proto.MESSAGE, + number=1, + message=gcaspi_text_sentiment.TextSentimentPredictionInstance, + ) + + prediction = proto.Field(proto.MESSAGE, number=2, message=Prediction,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..38bd8e3c85 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"TimeSeriesForecastingPredictionResult",}, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field(proto.FLOAT, number=1) + + lower_bound = proto.Field(proto.FLOAT, number=2) + + upper_bound = proto.Field(proto.FLOAT, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..f76b51899b --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoActionRecognitionPredictionResult",}, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (~.duration.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field( + proto.MESSAGE, number=4, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..469023b122 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoClassificationPredictionResult",}, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (~.duration.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + type_ = proto.Field(proto.STRING, number=3) + + time_segment_start = proto.Field( + proto.MESSAGE, number=4, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..026f80a325 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import wrappers_pb2 as wrappers # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.predict.prediction", + manifest={"VideoObjectTrackingPredictionResult",}, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (~.duration.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (~.duration.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (~.wrappers.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[~.video_object_tracking.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (~.duration.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (~.wrappers.FloatValue): + The leftmost coordinate of the bounding box. + x_max (~.wrappers.FloatValue): + The rightmost coordinate of the bounding box. + y_min (~.wrappers.FloatValue): + The topmost coordinate of the bounding box. + y_max (~.wrappers.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + x_min = proto.Field(proto.MESSAGE, number=2, message=wrappers.FloatValue,) + + x_max = proto.Field(proto.MESSAGE, number=3, message=wrappers.FloatValue,) + + y_min = proto.Field(proto.MESSAGE, number=4, message=wrappers.FloatValue,) + + y_max = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,) + + id = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + time_segment_start = proto.Field( + proto.MESSAGE, number=3, message=duration.Duration, + ) + + time_segment_end = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + confidence = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,) + + frames = proto.RepeatedField(proto.MESSAGE, number=6, message=Frame,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..abd693172a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecasting, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecastingInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( + AutoMlForecastingMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import ( + AutoMlImageClassificationMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetection, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetectionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import ( + AutoMlImageObjectDetectionMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentation, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import ( + AutoMlImageSegmentationMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTables, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTablesInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import ( + AutoMlTablesMetadata, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import ( + AutoMlTextClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import ( + AutoMlTextClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import ( + AutoMlTextExtraction, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import ( + AutoMlTextExtractionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import ( + AutoMlTextSentiment, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import ( + AutoMlTextSentimentInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import ( + AutoMlVideoActionRecognition, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import ( + AutoMlVideoActionRecognitionInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import ( + AutoMlVideoClassification, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import ( + AutoMlVideoClassificationInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import ( + AutoMlVideoObjectTracking, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import ( + AutoMlVideoObjectTrackingInputs, +) +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", + "ExportEvaluatedDataItemsConfig", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..346ea62686 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .types.automl_forecasting import AutoMlForecasting +from .types.automl_forecasting import AutoMlForecastingInputs +from .types.automl_forecasting import AutoMlForecastingMetadata +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + + +__all__ = ( + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", + "ExportEvaluatedDataItemsConfig", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py new file mode 100644 index 0000000000..6a0e7903b2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +from .automl_forecasting import ( + AutoMlForecasting, + AutoMlForecastingInputs, + AutoMlForecastingMetadata, +) +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) + + +__all__ = ( + "ExportEvaluatedDataItemsConfig", + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + "AutoMlTables", + "AutoMlTablesInputs", + "AutoMlTablesMetadata", + "AutoMlTextClassification", + "AutoMlTextClassificationInputs", + "AutoMlTextExtraction", + "AutoMlTextExtractionInputs", + "AutoMlTextSentiment", + "AutoMlTextSentimentInputs", + "AutoMlVideoActionRecognition", + "AutoMlVideoActionRecognitionInputs", + "AutoMlVideoClassification", + "AutoMlVideoClassificationInputs", + "AutoMlVideoObjectTracking", + "AutoMlVideoObjectTrackingInputs", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py new file mode 100644 index 0000000000..40c549dc5f --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import ( + export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config, +) + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlForecasting", + "AutoMlForecastingInputs", + "AutoMlForecastingMetadata", + }, +) + + +class AutoMlForecasting(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Forecasting + Model. + + Attributes: + inputs (~.automl_forecasting.AutoMlForecastingInputs): + The input parameters of this TrainingJob. + metadata (~.automl_forecasting.AutoMlForecastingMetadata): + The metadata information. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlForecastingInputs",) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlForecastingMetadata", + ) + + +class AutoMlForecastingInputs(proto.Message): + r""" + + Attributes: + target_column (str): + The name of the column that the model is to + predict. + time_series_identifier_column (str): + The name of the column that identifies the + time series. + time_column (str): + The name of the column that identifies time + order in the time series. + transformations (Sequence[~.automl_forecasting.AutoMlForecastingInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that optimizes the value of the objective + function over the validation set. + + The supported optimization objectives: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). "minimize-rmspe" - Minimize root- + mean-squared percentage error (RMSPE). + "minimize-wape-mae" - Minimize the combination + of weighted absolute percentage error (WAPE) + and mean-absolute-error (MAE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + weight_column (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + static_columns (Sequence[str]): + Column names that should be used as static + columns. The value of these columns are static + per time series. + time_variant_past_only_columns (Sequence[str]): + Column names that should be used as time variant past only + columns. This column contains information for the given + entity (identified by the time_series_identifier_column) + that is known for the past but not the future (e.g. + population of a city in a given year, or weather on a given + day). + time_variant_past_and_future_columns (Sequence[str]): + Column names that should be used as time + variant past and future columns. This column + contains information for the given entity + (identified by the key column) that is known for + the past and the future + period (~.automl_forecasting.AutoMlForecastingInputs.Period): + Expected difference in time granularity + between rows in the data. If it is not set, the + period is inferred from data. + forecast_window_start (int): + The number of periods offset into the future as the start of + the forecast window (the window of future values to predict, + relative to the present.), where each period is one unit of + granularity as defined by the ``period`` field above. + Default to 0. Inclusive. + forecast_window_end (int): + The number of periods offset into the future as the end of + the forecast window (the window of future values to predict, + relative to the present.), where each period is one unit of + granularity as defined by the ``period`` field above. + Inclusive. + past_horizon (int): + The number of periods offset into the past to restrict past + sequence, where each period is one unit of granularity as + defined by the ``period``. Default value 0 means that it + lets algorithm to define the value. Inclusive. + export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + """ + + class Transformation(proto.Message): + r""" + + Attributes: + auto (~.automl_forecasting.AutoMlForecastingInputs.Transformation.AutoTransformation): + + numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericTransformation): + + categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalTransformation): + + timestamp (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TimestampTransformation): + + text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextTransformation): + + repeated_numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + time_format = proto.Field(proto.STRING, number=2) + + invalid_values_allowed = proto.Field(proto.BOOL, number=3) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.AutoTransformation", + ) + + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.NumericTransformation", + ) + + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.CategoricalTransformation", + ) + + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TimestampTransformation", + ) + + text = proto.Field( + proto.MESSAGE, + number=5, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TextTransformation", + ) + + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.NumericArrayTransformation", + ) + + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation", + ) + + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TextArrayTransformation", + ) + + class Period(proto.Message): + r"""A duration of time expressed in time granularity units. + + Attributes: + unit (str): + The time granularity unit of this time + period. The supported unit are: + "hour" + "day" + "week" + "month" + "year". + quantity (int): + The number of units per period, e.g. 3 weeks + or 2 months. + """ + + unit = proto.Field(proto.STRING, number=1) + + quantity = proto.Field(proto.INT64, number=2) + + target_column = proto.Field(proto.STRING, number=1) + + time_series_identifier_column = proto.Field(proto.STRING, number=2) + + time_column = proto.Field(proto.STRING, number=3) + + transformations = proto.RepeatedField( + proto.MESSAGE, number=4, message=Transformation, + ) + + optimization_objective = proto.Field(proto.STRING, number=5) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=6) + + weight_column = proto.Field(proto.STRING, number=7) + + static_columns = proto.RepeatedField(proto.STRING, number=8) + + time_variant_past_only_columns = proto.RepeatedField(proto.STRING, number=9) + + time_variant_past_and_future_columns = proto.RepeatedField(proto.STRING, number=10) + + period = proto.Field(proto.MESSAGE, number=11, message=Period,) + + forecast_window_start = proto.Field(proto.INT64, number=12) + + forecast_window_end = proto.Field(proto.INT64, number=13) + + past_horizon = proto.Field(proto.INT64, number=14) + + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=15, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + + +class AutoMlForecastingMetadata(proto.Message): + r"""Model metadata specific to AutoML Forecasting. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py new file mode 100644 index 0000000000..0ee0394192 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageClassification", + "AutoMlImageClassificationInputs", + "AutoMlImageClassificationMetadata", + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (~.automl_image_classification.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_classification.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageClassificationInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageClassificationMetadata", + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_classification.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + base_model_id = proto.Field(proto.STRING, number=2) + + budget_milli_node_hours = proto.Field(proto.INT64, number=3) + + disable_early_stopping = proto.Field(proto.BOOL, number=4) + + multi_label = proto.Field(proto.BOOL, number=5) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_classification.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..3fb9d3ae1d --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageObjectDetection", + "AutoMlImageObjectDetectionInputs", + "AutoMlImageObjectDetectionMetadata", + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageObjectDetectionInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageObjectDetectionMetadata", + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + budget_milli_node_hours = proto.Field(proto.INT64, number=2) + + disable_early_stopping = proto.Field(proto.BOOL, number=3) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..0fa3788b11 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={ + "AutoMlImageSegmentation", + "AutoMlImageSegmentationInputs", + "AutoMlImageSegmentationMetadata", + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (~.automl_image_segmentation.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (~.automl_image_segmentation.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlImageSegmentationInputs", + ) + + metadata = proto.Field( + proto.MESSAGE, number=2, message="AutoMlImageSegmentationMetadata", + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_image_segmentation.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + budget_milli_node_hours = proto.Field(proto.INT64, number=2) + + base_model_id = proto.Field(proto.STRING, number=3) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (~.automl_image_segmentation.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + successful_stop_reason = proto.Field( + proto.ENUM, number=2, enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py new file mode 100644 index 0000000000..55d620b32e --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import ( + export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config, +) + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",}, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + + Attributes: + inputs (~.automl_tables.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (~.automl_tables.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",) + + metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",) + + +class AutoMlTablesInputs(proto.Message): + r""" + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[~.automl_tables.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. "maximize- + precision-at-recall" - Maximize precision for a + specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + """ + + class Transformation(proto.Message): + r""" + + Attributes: + auto (~.automl_tables.AutoMlTablesInputs.Transformation.AutoTransformation): + + numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericTransformation): + + categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + timestamp (~.automl_tables.AutoMlTablesInputs.Transformation.TimestampTransformation): + + text (~.automl_tables.AutoMlTablesInputs.Transformation.TextTransformation): + + repeated_numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (~.automl_tables.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + time_format = proto.Field(proto.STRING, number=2) + + invalid_values_allowed = proto.Field(proto.BOOL, number=3) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field(proto.STRING, number=1) + + invalid_values_allowed = proto.Field(proto.BOOL, number=2) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field(proto.STRING, number=1) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.AutoTransformation", + ) + + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.NumericTransformation", + ) + + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.CategoricalTransformation", + ) + + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TimestampTransformation", + ) + + text = proto.Field( + proto.MESSAGE, + number=5, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TextTransformation", + ) + + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.NumericArrayTransformation", + ) + + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation", + ) + + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof="transformation_detail", + message="AutoMlTablesInputs.Transformation.TextArrayTransformation", + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, number=5, oneof="additional_optimization_objective_config" + ) + + optimization_objective_precision_value = proto.Field( + proto.FLOAT, number=6, oneof="additional_optimization_objective_config" + ) + + prediction_type = proto.Field(proto.STRING, number=1) + + target_column = proto.Field(proto.STRING, number=2) + + transformations = proto.RepeatedField( + proto.MESSAGE, number=3, message=Transformation, + ) + + optimization_objective = proto.Field(proto.STRING, number=4) + + train_budget_milli_node_hours = proto.Field(proto.INT64, number=7) + + disable_early_stopping = proto.Field(proto.BOOL, number=8) + + weight_column_name = proto.Field(proto.STRING, number=9) + + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field(proto.INT64, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py new file mode 100644 index 0000000000..ca75734600 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextClassification", "AutoMlTextClassificationInputs",}, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (~.automl_text_classification.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlTextClassificationInputs", + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field(proto.BOOL, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py new file mode 100644 index 0000000000..336509af22 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextExtraction", "AutoMlTextExtractionInputs",}, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (~.automl_text_extraction.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextExtractionInputs",) + + +class AutoMlTextExtractionInputs(proto.Message): + r"""""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..d5de97e2b2 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlTextSentiment", "AutoMlTextSentimentInputs",}, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (~.automl_text_sentiment.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextSentimentInputs",) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field(proto.INT32, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..d6969d93c6 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoActionRecognition", "AutoMlVideoActionRecognitionInputs",}, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoActionRecognitionInputs", + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py new file mode 100644 index 0000000000..3164544d47 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoClassification", "AutoMlVideoClassificationInputs",}, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (~.automl_video_classification.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoClassificationInputs", + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_classification.AutoMlVideoClassificationInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..0fd8c7ec7a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"AutoMlVideoObjectTracking", "AutoMlVideoObjectTrackingInputs",}, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, number=1, message="AutoMlVideoObjectTrackingInputs", + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + + Attributes: + model_type (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..29bc547adf --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + manifest={"ExportEvaluatedDataItemsConfig",}, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. If not specified, + then results are exported to the following auto-created + BigQuery table: + + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + will be overwritten. Otherwise, if the export + destination already exists, then the export + operation will not trigger and a failure + response is returned. + """ + + destination_bigquery_uri = proto.Field(proto.STRING, number=1) + + override_existing_table = proto.Field(proto.BOOL, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index f49f90f5eb..5f466b2e9b 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -229,6 +229,7 @@ "DataItem", "DataLabelingJob", "Dataset", + "DatasetServiceClient", "DedicatedResources", "DeleteBatchPredictionJobRequest", "DeleteCustomJobRequest", @@ -345,7 +346,6 @@ "SearchMigratableResourcesResponse", "SmoothGradConfig", "SpecialistPool", - "SpecialistPoolServiceClient", "StudySpec", "TimestampSplit", "TrainingConfig", @@ -365,5 +365,5 @@ "UserActionReference", "WorkerPoolSpec", "XraiAttribution", - "DatasetServiceClient", + "SpecialistPoolServiceClient", ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 775558e3b1..1927709f30 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -228,7 +228,7 @@ async def create_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -308,7 +308,7 @@ async def get_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -396,7 +396,7 @@ async def update_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -473,7 +473,7 @@ async def list_datasets( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_datasets, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,7 +567,7 @@ async def delete_dataset( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_dataset, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -662,7 +662,7 @@ async def import_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.import_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -755,7 +755,7 @@ async def export_data( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_data, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -839,7 +839,7 @@ async def list_data_items( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_items, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -919,7 +919,7 @@ async def get_annotation_spec( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_annotation_spec, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -996,7 +996,7 @@ async def list_annotations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_annotations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 8cceeb197c..56f567959a 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -112,34 +112,34 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, default_timeout=None, client_info=client_info, + self.create_dataset, default_timeout=5.0, client_info=client_info, ), self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, default_timeout=None, client_info=client_info, + self.get_dataset, default_timeout=5.0, client_info=client_info, ), self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, default_timeout=None, client_info=client_info, + self.update_dataset, default_timeout=5.0, client_info=client_info, ), self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, default_timeout=None, client_info=client_info, + self.list_datasets, default_timeout=5.0, client_info=client_info, ), self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, default_timeout=None, client_info=client_info, + self.delete_dataset, default_timeout=5.0, client_info=client_info, ), self.import_data: gapic_v1.method.wrap_method( - self.import_data, default_timeout=None, client_info=client_info, + self.import_data, default_timeout=5.0, client_info=client_info, ), self.export_data: gapic_v1.method.wrap_method( - self.export_data, default_timeout=None, client_info=client_info, + self.export_data, default_timeout=5.0, client_info=client_info, ), self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, default_timeout=None, client_info=client_info, + self.list_data_items, default_timeout=5.0, client_info=client_info, ), self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, default_timeout=None, client_info=client_info, + self.get_annotation_spec, default_timeout=5.0, client_info=client_info, ), self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, default_timeout=None, client_info=client_info, + self.list_annotations, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 9056e7a149..9c6af3bd16 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -219,7 +219,7 @@ async def create_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -300,7 +300,7 @@ async def get_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -376,7 +376,7 @@ async def list_endpoints( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_endpoints, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -464,7 +464,7 @@ async def update_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -554,7 +554,7 @@ async def delete_endpoint( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_endpoint, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -678,7 +678,7 @@ async def deploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.deploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -793,7 +793,7 @@ async def undeploy_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.undeploy_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index 63965464b7..e55589de8f 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -111,25 +111,25 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, default_timeout=None, client_info=client_info, + self.create_endpoint, default_timeout=5.0, client_info=client_info, ), self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, default_timeout=None, client_info=client_info, + self.get_endpoint, default_timeout=5.0, client_info=client_info, ), self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, default_timeout=None, client_info=client_info, + self.list_endpoints, default_timeout=5.0, client_info=client_info, ), self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, default_timeout=None, client_info=client_info, + self.update_endpoint, default_timeout=5.0, client_info=client_info, ), self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, default_timeout=None, client_info=client_info, + self.delete_endpoint, default_timeout=5.0, client_info=client_info, ), self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, default_timeout=None, client_info=client_info, + self.deploy_model, default_timeout=5.0, client_info=client_info, ), self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, default_timeout=None, client_info=client_info, + self.undeploy_model, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index d988c81d3c..2a24748d11 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -252,7 +252,7 @@ async def create_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -330,7 +330,7 @@ async def get_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -406,7 +406,7 @@ async def list_custom_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_custom_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -500,7 +500,7 @@ async def delete_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -586,7 +586,7 @@ async def cancel_custom_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_custom_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -667,7 +667,7 @@ async def create_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -741,7 +741,7 @@ async def get_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -816,7 +816,7 @@ async def list_data_labeling_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -911,7 +911,7 @@ async def delete_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -987,7 +987,7 @@ async def cancel_data_labeling_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1070,7 +1070,7 @@ async def create_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1146,7 +1146,7 @@ async def get_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1222,7 +1222,7 @@ async def list_hyperparameter_tuning_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1317,7 +1317,7 @@ async def delete_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1406,7 +1406,7 @@ async def cancel_hyperparameter_tuning_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1493,7 +1493,7 @@ async def create_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1572,7 +1572,7 @@ async def get_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1648,7 +1648,7 @@ async def list_batch_prediction_jobs( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1744,7 +1744,7 @@ async def delete_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1831,7 +1831,7 @@ async def cancel_batch_prediction_job( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 04c05890bc..3d1f0be59b 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -124,93 +124,93 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, default_timeout=None, client_info=client_info, + self.create_custom_job, default_timeout=5.0, client_info=client_info, ), self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, default_timeout=None, client_info=client_info, + self.get_custom_job, default_timeout=5.0, client_info=client_info, ), self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, default_timeout=None, client_info=client_info, + self.list_custom_jobs, default_timeout=5.0, client_info=client_info, ), self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, default_timeout=None, client_info=client_info, + self.delete_custom_job, default_timeout=5.0, client_info=client_info, ), self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, default_timeout=None, client_info=client_info, + self.cancel_custom_job, default_timeout=5.0, client_info=client_info, ), self.create_data_labeling_job: gapic_v1.method.wrap_method( self.create_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_data_labeling_job: gapic_v1.method.wrap_method( self.get_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_data_labeling_jobs: gapic_v1.method.wrap_method( self.list_data_labeling_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_data_labeling_job: gapic_v1.method.wrap_method( self.delete_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_data_labeling_job: gapic_v1.method.wrap_method( self.cancel_data_labeling_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.create_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.get_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( self.list_hyperparameter_tuning_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.delete_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( self.cancel_hyperparameter_tuning_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.create_batch_prediction_job: gapic_v1.method.wrap_method( self.create_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_batch_prediction_job: gapic_v1.method.wrap_method( self.get_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( self.list_batch_prediction_jobs, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_batch_prediction_job: gapic_v1.method.wrap_method( self.delete_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( self.cancel_batch_prediction_job, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index 81c1f9cb51..3b27b6e184 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -235,7 +235,7 @@ async def upload_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.upload_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -313,7 +313,7 @@ async def get_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -389,7 +389,7 @@ async def list_models( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_models, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -476,7 +476,7 @@ async def update_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -568,7 +568,7 @@ async def delete_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -666,7 +666,7 @@ async def export_model( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_model, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -750,7 +750,7 @@ async def get_model_evaluation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -826,7 +826,7 @@ async def list_model_evaluations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -908,7 +908,7 @@ async def get_model_evaluation_slice( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -985,7 +985,7 @@ async def list_model_evaluation_slices( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index 681d035178..2f87fc98dd 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -113,41 +113,39 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, default_timeout=None, client_info=client_info, + self.upload_model, default_timeout=5.0, client_info=client_info, ), self.get_model: gapic_v1.method.wrap_method( - self.get_model, default_timeout=None, client_info=client_info, + self.get_model, default_timeout=5.0, client_info=client_info, ), self.list_models: gapic_v1.method.wrap_method( - self.list_models, default_timeout=None, client_info=client_info, + self.list_models, default_timeout=5.0, client_info=client_info, ), self.update_model: gapic_v1.method.wrap_method( - self.update_model, default_timeout=None, client_info=client_info, + self.update_model, default_timeout=5.0, client_info=client_info, ), self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, default_timeout=None, client_info=client_info, + self.delete_model, default_timeout=5.0, client_info=client_info, ), self.export_model: gapic_v1.method.wrap_method( - self.export_model, default_timeout=None, client_info=client_info, + self.export_model, default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=None, - client_info=client_info, + self.get_model_evaluation, default_timeout=5.0, client_info=client_info, ), self.list_model_evaluations: gapic_v1.method.wrap_method( self.list_model_evaluations, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_model_evaluation_slice: gapic_v1.method.wrap_method( self.get_model_evaluation_slice, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_model_evaluation_slices: gapic_v1.method.wrap_method( self.list_model_evaluation_slices, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index d361b05e21..ef420aae0b 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -230,7 +230,7 @@ async def create_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -308,7 +308,7 @@ async def get_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -384,7 +384,7 @@ async def list_training_pipelines( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -479,7 +479,7 @@ async def delete_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -567,7 +567,7 @@ async def cancel_training_pipeline( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 1b235635f1..41123b8615 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -115,27 +115,27 @@ def _prep_wrapped_messages(self, client_info): self._wrapped_methods = { self.create_training_pipeline: gapic_v1.method.wrap_method( self.create_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_training_pipeline: gapic_v1.method.wrap_method( self.get_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.list_training_pipelines: gapic_v1.method.wrap_method( self.list_training_pipelines, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_training_pipeline: gapic_v1.method.wrap_method( self.delete_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.cancel_training_pipeline: gapic_v1.method.wrap_method( self.cancel_training_pipeline, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index c82146bafa..bb58b0bfac 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -230,7 +230,7 @@ async def predict( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.predict, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -356,7 +356,7 @@ async def explain( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.explain, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index cdec1c11e5..0c82f7d83c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -107,10 +107,10 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.predict: gapic_v1.method.wrap_method( - self.predict, default_timeout=None, client_info=client_info, + self.predict, default_timeout=5.0, client_info=client_info, ), self.explain: gapic_v1.method.wrap_method( - self.explain, default_timeout=None, client_info=client_info, + self.explain, default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 77f40bd4ad..c693126d4c 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -236,7 +236,7 @@ async def create_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -327,7 +327,7 @@ async def get_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -403,7 +403,7 @@ async def list_specialist_pools( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -498,7 +498,7 @@ async def delete_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -597,7 +597,7 @@ async def update_specialist_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index 30fbd3030f..f1af058030 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -111,25 +111,25 @@ def _prep_wrapped_messages(self, client_info): self._wrapped_methods = { self.create_specialist_pool: gapic_v1.method.wrap_method( self.create_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, default_timeout=None, client_info=client_info, + self.get_specialist_pool, default_timeout=5.0, client_info=client_info, ), self.list_specialist_pools: gapic_v1.method.wrap_method( self.list_specialist_pools, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.delete_specialist_pool: gapic_v1.method.wrap_method( self.delete_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), self.update_specialist_pool: gapic_v1.method.wrap_method( self.update_specialist_pool, - default_timeout=None, + default_timeout=5.0, client_info=client_info, ), } diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 97e5625d20..c668a7be98 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -15,7 +15,23 @@ # limitations under the License. # +from .user_action_reference import UserActionReference +from .annotation import Annotation from .annotation_spec import AnnotationSpec +from .completion_stats import CompletionStats +from .explanation_metadata import ExplanationMetadata +from .explanation import ( + Explanation, + ModelExplanation, + Attribution, + ExplanationSpec, + ExplanationParameters, + SampledShapleyAttribution, + IntegratedGradientsAttribution, + XraiAttribution, + SmoothGradConfig, + FeatureNoiseSigma, +) from .io import ( GcsSource, GcsDestination, @@ -23,14 +39,6 @@ BigQueryDestination, ContainerRegistryDestination, ) -from .dataset import ( - Dataset, - ImportDataConfig, - ExportDataConfig, -) -from .manual_batch_tuning_parameters import ManualBatchTuningParameters -from .completion_stats import CompletionStats -from .model_evaluation_slice import ModelEvaluationSlice from .machine_resources import ( MachineSpec, DedicatedResources, @@ -39,21 +47,35 @@ ResourcesConsumed, DiskSpec, ) -from .deployed_model_ref import DeployedModelRef +from .manual_batch_tuning_parameters import ManualBatchTuningParameters +from .batch_prediction_job import BatchPredictionJob from .env_var import EnvVar -from .explanation_metadata import ExplanationMetadata -from .explanation import ( - Explanation, - ModelExplanation, - Attribution, - ExplanationSpec, - ExplanationParameters, - SampledShapleyAttribution, - IntegratedGradientsAttribution, - XraiAttribution, - SmoothGradConfig, - FeatureNoiseSigma, +from .custom_job import ( + CustomJob, + CustomJobSpec, + WorkerPoolSpec, + ContainerSpec, + PythonPackageSpec, + Scheduling, ) +from .data_item import DataItem +from .specialist_pool import SpecialistPool +from .data_labeling_job import ( + DataLabelingJob, + ActiveLearningConfig, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ImportDataConfig, + ExportDataConfig, +) +from .operation import ( + GenericOperationMetadata, + DeleteOperationMetadata, +) +from .deployed_model_ref import DeployedModelRef from .model import ( Model, PredictSchemata, @@ -68,36 +90,44 @@ PredefinedSplit, TimestampSplit, ) -from .model_evaluation import ModelEvaluation -from .migratable_resource import MigratableResource -from .operation import ( - GenericOperationMetadata, - DeleteOperationMetadata, -) -from .migration_service import ( - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, - BatchMigrateResourcesRequest, - MigrateResourceRequest, - BatchMigrateResourcesResponse, - MigrateResourceResponse, - BatchMigrateResourcesOperationMetadata, +from .dataset_service import ( + CreateDatasetRequest, + CreateDatasetOperationMetadata, + GetDatasetRequest, + UpdateDatasetRequest, + ListDatasetsRequest, + ListDatasetsResponse, + DeleteDatasetRequest, + ImportDataRequest, + ImportDataResponse, + ImportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + ExportDataOperationMetadata, + ListDataItemsRequest, + ListDataItemsResponse, + GetAnnotationSpecRequest, + ListAnnotationsRequest, + ListAnnotationsResponse, ) -from .batch_prediction_job import BatchPredictionJob -from .custom_job import ( - CustomJob, - CustomJobSpec, - WorkerPoolSpec, - ContainerSpec, - PythonPackageSpec, - Scheduling, +from .endpoint import ( + Endpoint, + DeployedModel, ) -from .specialist_pool import SpecialistPool -from .data_labeling_job import ( - DataLabelingJob, - ActiveLearningConfig, - SampleConfig, - TrainingConfig, +from .endpoint_service import ( + CreateEndpointRequest, + CreateEndpointOperationMetadata, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UpdateEndpointRequest, + DeleteEndpointRequest, + DeployModelRequest, + DeployModelResponse, + DeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UndeployModelOperationMetadata, ) from .study import ( Trial, @@ -131,51 +161,18 @@ DeleteBatchPredictionJobRequest, CancelBatchPredictionJobRequest, ) -from .user_action_reference import UserActionReference -from .annotation import Annotation -from .endpoint import ( - Endpoint, - DeployedModel, -) -from .prediction_service import ( - PredictRequest, - PredictResponse, - ExplainRequest, - ExplainResponse, -) -from .endpoint_service import ( - CreateEndpointRequest, - CreateEndpointOperationMetadata, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UpdateEndpointRequest, - DeleteEndpointRequest, - DeployModelRequest, - DeployModelResponse, - DeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UndeployModelOperationMetadata, -) -from .pipeline_service import ( - CreateTrainingPipelineRequest, - GetTrainingPipelineRequest, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, - DeleteTrainingPipelineRequest, - CancelTrainingPipelineRequest, -) -from .specialist_pool_service import ( - CreateSpecialistPoolRequest, - CreateSpecialistPoolOperationMetadata, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - DeleteSpecialistPoolRequest, - UpdateSpecialistPoolRequest, - UpdateSpecialistPoolOperationMetadata, +from .migratable_resource import MigratableResource +from .migration_service import ( + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, + BatchMigrateResourcesRequest, + MigrateResourceRequest, + BatchMigrateResourcesResponse, + MigrateResourceResponse, + BatchMigrateResourcesOperationMetadata, ) +from .model_evaluation import ModelEvaluation +from .model_evaluation_slice import ModelEvaluationSlice from .model_service import ( UploadModelRequest, UploadModelOperationMetadata, @@ -195,50 +192,37 @@ ListModelEvaluationSlicesRequest, ListModelEvaluationSlicesResponse, ) -from .data_item import DataItem -from .dataset_service import ( - CreateDatasetRequest, - CreateDatasetOperationMetadata, - GetDatasetRequest, - UpdateDatasetRequest, - ListDatasetsRequest, - ListDatasetsResponse, - DeleteDatasetRequest, - ImportDataRequest, - ImportDataResponse, - ImportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - ExportDataOperationMetadata, - ListDataItemsRequest, - ListDataItemsResponse, - GetAnnotationSpecRequest, - ListAnnotationsRequest, - ListAnnotationsResponse, +from .pipeline_service import ( + CreateTrainingPipelineRequest, + GetTrainingPipelineRequest, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, + DeleteTrainingPipelineRequest, + CancelTrainingPipelineRequest, +) +from .prediction_service import ( + PredictRequest, + PredictResponse, + ExplainRequest, + ExplainResponse, +) +from .specialist_pool_service import ( + CreateSpecialistPoolRequest, + CreateSpecialistPoolOperationMetadata, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + DeleteSpecialistPoolRequest, + UpdateSpecialistPoolRequest, + UpdateSpecialistPoolOperationMetadata, ) __all__ = ( + "UserActionReference", + "Annotation", "AnnotationSpec", - "GcsSource", - "GcsDestination", - "BigQuerySource", - "BigQueryDestination", - "ContainerRegistryDestination", - "Dataset", - "ImportDataConfig", - "ExportDataConfig", - "ManualBatchTuningParameters", "CompletionStats", - "ModelEvaluationSlice", - "MachineSpec", - "DedicatedResources", - "AutomaticResources", - "BatchDedicatedResources", - "ResourcesConsumed", - "DiskSpec", - "DeployedModelRef", - "EnvVar", "ExplanationMetadata", "Explanation", "ModelExplanation", @@ -250,39 +234,81 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", - "Model", - "PredictSchemata", - "ModelContainerSpec", - "Port", - "TrainingPipeline", - "InputDataConfig", - "FractionSplit", - "FilterSplit", - "PredefinedSplit", - "TimestampSplit", - "ModelEvaluation", - "MigratableResource", - "GenericOperationMetadata", - "DeleteOperationMetadata", - "SearchMigratableResourcesRequest", - "SearchMigratableResourcesResponse", - "BatchMigrateResourcesRequest", - "MigrateResourceRequest", - "BatchMigrateResourcesResponse", - "MigrateResourceResponse", - "BatchMigrateResourcesOperationMetadata", + "GcsSource", + "GcsDestination", + "BigQuerySource", + "BigQueryDestination", + "ContainerRegistryDestination", + "MachineSpec", + "DedicatedResources", + "AutomaticResources", + "BatchDedicatedResources", + "ResourcesConsumed", + "DiskSpec", + "ManualBatchTuningParameters", "BatchPredictionJob", + "EnvVar", "CustomJob", "CustomJobSpec", "WorkerPoolSpec", "ContainerSpec", "PythonPackageSpec", "Scheduling", + "DataItem", "SpecialistPool", "DataLabelingJob", "ActiveLearningConfig", "SampleConfig", "TrainingConfig", + "Dataset", + "ImportDataConfig", + "ExportDataConfig", + "GenericOperationMetadata", + "DeleteOperationMetadata", + "DeployedModelRef", + "Model", + "PredictSchemata", + "ModelContainerSpec", + "Port", + "TrainingPipeline", + "InputDataConfig", + "FractionSplit", + "FilterSplit", + "PredefinedSplit", + "TimestampSplit", + "CreateDatasetRequest", + "CreateDatasetOperationMetadata", + "GetDatasetRequest", + "UpdateDatasetRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "DeleteDatasetRequest", + "ImportDataRequest", + "ImportDataResponse", + "ImportDataOperationMetadata", + "ExportDataRequest", + "ExportDataResponse", + "ExportDataOperationMetadata", + "ListDataItemsRequest", + "ListDataItemsResponse", + "GetAnnotationSpecRequest", + "ListAnnotationsRequest", + "ListAnnotationsResponse", + "Endpoint", + "DeployedModel", + "CreateEndpointRequest", + "CreateEndpointOperationMetadata", + "GetEndpointRequest", + "ListEndpointsRequest", + "ListEndpointsResponse", + "UpdateEndpointRequest", + "DeleteEndpointRequest", + "DeployModelRequest", + "DeployModelResponse", + "DeployModelOperationMetadata", + "UndeployModelRequest", + "UndeployModelResponse", + "UndeployModelOperationMetadata", "Trial", "StudySpec", "Measurement", @@ -311,41 +337,16 @@ "ListBatchPredictionJobsResponse", "DeleteBatchPredictionJobRequest", "CancelBatchPredictionJobRequest", - "UserActionReference", - "Annotation", - "Endpoint", - "DeployedModel", - "PredictRequest", - "PredictResponse", - "ExplainRequest", - "ExplainResponse", - "CreateEndpointRequest", - "CreateEndpointOperationMetadata", - "GetEndpointRequest", - "ListEndpointsRequest", - "ListEndpointsResponse", - "UpdateEndpointRequest", - "DeleteEndpointRequest", - "DeployModelRequest", - "DeployModelResponse", - "DeployModelOperationMetadata", - "UndeployModelRequest", - "UndeployModelResponse", - "UndeployModelOperationMetadata", - "CreateTrainingPipelineRequest", - "GetTrainingPipelineRequest", - "ListTrainingPipelinesRequest", - "ListTrainingPipelinesResponse", - "DeleteTrainingPipelineRequest", - "CancelTrainingPipelineRequest", - "CreateSpecialistPoolRequest", - "CreateSpecialistPoolOperationMetadata", - "GetSpecialistPoolRequest", - "ListSpecialistPoolsRequest", - "ListSpecialistPoolsResponse", - "DeleteSpecialistPoolRequest", - "UpdateSpecialistPoolRequest", - "UpdateSpecialistPoolOperationMetadata", + "MigratableResource", + "SearchMigratableResourcesRequest", + "SearchMigratableResourcesResponse", + "BatchMigrateResourcesRequest", + "MigrateResourceRequest", + "BatchMigrateResourcesResponse", + "MigrateResourceResponse", + "BatchMigrateResourcesOperationMetadata", + "ModelEvaluation", + "ModelEvaluationSlice", "UploadModelRequest", "UploadModelOperationMetadata", "UploadModelResponse", @@ -363,23 +364,22 @@ "GetModelEvaluationSliceRequest", "ListModelEvaluationSlicesRequest", "ListModelEvaluationSlicesResponse", - "DataItem", - "CreateDatasetRequest", - "CreateDatasetOperationMetadata", - "GetDatasetRequest", - "UpdateDatasetRequest", - "ListDatasetsRequest", - "ListDatasetsResponse", - "DeleteDatasetRequest", - "ImportDataRequest", - "ImportDataResponse", - "ImportDataOperationMetadata", - "ExportDataRequest", - "ExportDataResponse", - "ExportDataOperationMetadata", - "ListDataItemsRequest", - "ListDataItemsResponse", - "GetAnnotationSpecRequest", - "ListAnnotationsRequest", - "ListAnnotationsResponse", + "CreateTrainingPipelineRequest", + "GetTrainingPipelineRequest", + "ListTrainingPipelinesRequest", + "ListTrainingPipelinesResponse", + "DeleteTrainingPipelineRequest", + "CancelTrainingPipelineRequest", + "PredictRequest", + "PredictResponse", + "ExplainRequest", + "ExplainResponse", + "CreateSpecialistPoolRequest", + "CreateSpecialistPoolOperationMetadata", + "GetSpecialistPoolRequest", + "ListSpecialistPoolsRequest", + "ListSpecialistPoolsResponse", + "DeleteSpecialistPoolRequest", + "UpdateSpecialistPoolRequest", + "UpdateSpecialistPoolOperationMetadata", ) diff --git a/noxfile.py b/noxfile.py index 1797beebfd..87765339b5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -28,7 +28,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -81,9 +81,8 @@ def default(session): session.run( "py.test", "--quiet", - "--cov=google.cloud.aiplatform", - "--cov=google.cloud", - "--cov=tests.unit", + "--cov=google/cloud", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", diff --git a/synth.metadata b/synth.metadata index 9399d8c2e3..b39f24bbb9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,15 +3,22 @@ { "git": { "name": ".", - "remote": "https://github.com/dizcology/python-aiplatform.git", - "sha": "81da030c0af8902fd54c8e7b5e92255a532d0efb" + "remote": "https://github.com/googleapis/python-aiplatform.git", + "sha": "688a06ff0bcc291cb63225787b7083e0b96b3615" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba9918cd22874245b55734f57470c719b577e591" + "sha": "f94318521f63085b9ccb43d42af89f153fb39f15" + } + }, + { + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "f94318521f63085b9ccb43d42af89f153fb39f15" } } ], @@ -22,7 +29,7 @@ "apiName": "aiplatform", "apiVersion": "v1beta1", "language": "python", - "generator": "gapic-generator-python" + "generator": "bazel" } } ] diff --git a/synth.py b/synth.py index 8685e21af7..107235edac 100644 --- a/synth.py +++ b/synth.py @@ -37,12 +37,26 @@ s.move( library, excludes=[ + ".pre-commit-config.yaml", "setup.py", "README.rst", "docs/index.rst", + "docs/definition_v1beta1/services.rst", + "docs/instance_v1beta1/services.rst", + "docs/params_v1beta1/services.rst", + "docs/prediction_v1beta1/services.rst", "scripts/fixup_aiplatform_v1beta1_keywords.py", + "scripts/fixup_definition_v1beta1_keywords.py", + "scripts/fixup_instance_v1beta1_keywords.py", + "scripts/fixup_params_v1beta1_keywords.py", + "scripts/fixup_prediction_v1beta1_keywords.py", "google/cloud/aiplatform/__init__.py", + "google/cloud/aiplatform/v1beta1/schema/**/services/", "tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py", + "tests/unit/gapic/definition_v1beta1/", + "tests/unit/gapic/instance_v1beta1/", + "tests/unit/gapic/params_v1beta1/", + "tests/unit/gapic/prediction_v1beta1/", ], ) From 151ed11f6da8e3e0bee5749d360d9a4b135ad988 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 4 Dec 2020 16:41:20 -0800 Subject: [PATCH 18/34] chore: add sample_configs (#115) --- .sample_configs/implementations.py | 27 ++ .../batch_delete_data_items_sample.py | 27 ++ .../batch_migrate_resources_sample.py | 27 ++ .../cancel_batch_prediction_job_sample.py | 21 ++ .../cancel_custom_job_sample.py | 21 ++ .../cancel_data_labeling_job_sample.py | 21 ++ ...cancel_hyperparameter_tuning_job_sample.py | 21 ++ .../cancel_training_pipeline_sample.py | 21 ++ .../create_annotation_sample.py | 32 ++ .../create_annotation_spec_sample.py | 28 ++ ...te_batch_prediction_job_bigquery_sample.py | 47 +++ ...diction_job_custom_image_explain_sample.py | 55 +++ ...ction_job_custom_tabular_explain_sample.py | 55 +++ .../create_batch_prediction_job_sample.py | 55 +++ ...h_prediction_job_tabular_explain_sample.py | 54 +++ ...ediction_job_text_classification_sample.py | 44 +++ ...ction_job_text_entity_extraction_sample.py | 44 +++ ...tion_job_text_sentiment_analysis_sample.py | 44 +++ ...ion_job_video_action_recognition_sample.py | 49 +++ ...diction_job_video_classification_sample.py | 52 +++ ...iction_job_video_object_tracking_sample.py | 48 +++ ...create_custom_job_python_package_sample.py | 41 +++ .../create_custom_job_sample.py | 43 +++ ...ata_labeling_job_active_learning_sample.py | 49 +++ ..._labeling_job_image_segmentation_sample.py | 47 +++ .../create_data_labeling_job_images_sample.py | 42 +++ .../create_data_labeling_job_sample.py | 42 +++ ...ata_labeling_job_specialist_pool_sample.py | 49 +++ .../create_data_labeling_job_text_sample.py | 27 ++ .../create_data_labeling_job_video_sample.py | 43 +++ .../create_dataset_image_sample.py | 29 ++ .../param_handlers/create_dataset_sample.py | 29 ++ .../create_dataset_tabular_bigquery_sample.py | 37 ++ .../create_dataset_tabular_gcs_sample.py | 37 ++ .../create_dataset_text_sample.py | 29 ++ .../create_dataset_video_sample.py | 29 ++ .../param_handlers/create_endpoint_sample.py | 28 ++ ...ameter_tuning_job_python_package_sample.py | 89 +++++ ...create_hyperparameter_tuning_job_sample.py | 64 ++++ .../create_specialist_pool_sample.py | 28 ++ ...ate_training_pipeline_custom_job_sample.py | 65 ++++ ..._custom_training_managed_dataset_sample.py | 80 +++++ ...ng_pipeline_image_classification_sample.py | 43 +++ ..._pipeline_image_object_detection_sample.py | 42 +++ .../create_training_pipeline_sample.py | 44 +++ ..._pipeline_tabular_classification_sample.py | 72 ++++ ...ning_pipeline_tabular_regression_sample.py | 77 ++++ ...ing_pipeline_text_classification_sample.py | 36 ++ ..._pipeline_text_entity_extraction_sample.py | 37 ++ ...pipeline_text_sentiment_analysis_sample.py | 37 ++ ...ipeline_video_action_recognition_sample.py | 40 +++ ...ng_pipeline_video_classification_sample.py | 40 +++ ...g_pipeline_video_object_tracking_sample.py | 41 +++ .../delete_annotation_sample.py | 21 ++ .../delete_annotation_spec_sample.py | 21 ++ .../delete_batch_prediction_job_sample.py | 21 ++ .../delete_custom_job_sample.py | 21 ++ .../param_handlers/delete_data_item_sample.py | 21 ++ .../delete_data_labeling_job_sample.py | 21 ++ .../param_handlers/delete_dataset_sample.py | 21 ++ .../param_handlers/delete_endpoint_sample.py | 21 ++ ...delete_hyperparameter_tuning_job_sample.py | 21 ++ .../param_handlers/delete_model_sample.py | 21 ++ .../delete_specialist_pool_sample.py | 21 ++ .../delete_training_pipeline_sample.py | 21 ++ ...eploy_model_custom_trained_model_sample.py | 48 +++ .../param_handlers/deploy_model_sample.py | 43 +++ .../explain_custom_image_sample.py | 38 ++ .../explain_custom_tabular_sample.py | 38 ++ .../param_handlers/explain_sample.py | 39 ++ .../param_handlers/explain_tabular_sample.py | 38 ++ .../param_handlers/export_data_sample.py | 27 ++ .../export_evaluated_data_items_sample.py | 20 ++ .../param_handlers/export_model_sample.py | 30 ++ ...ort_model_tabular_classification_sample.py | 32 ++ ...t_model_video_action_recognition_sample.py | 33 ++ .../param_handlers/get_annotation_sample.py | 21 ++ .../get_annotation_spec_sample.py | 21 ++ .../get_batch_prediction_job_sample.py | 21 ++ .../param_handlers/get_custom_job_sample.py | 21 ++ .../param_handlers/get_data_item_sample.py | 21 ++ .../get_data_labeling_job_sample.py | 21 ++ .../param_handlers/get_dataset_sample.py | 21 ++ .../param_handlers/get_endpoint_sample.py | 21 ++ .../get_hyperparameter_tuning_job_sample.py | 21 ++ ...del_evaluation_entity_extraction_sample.py | 21 ++ ..._evaluation_image_classification_sample.py | 21 ++ ...valuation_image_object_detection_sample.py | 21 ++ .../get_model_evaluation_sample.py | 21 ++ ...el_evaluation_sentiment_analysis_sample.py | 21 ++ .../get_model_evaluation_slice_sample.py | 21 ++ ...valuation_tabular_classification_sample.py | 20 ++ ...el_evaluation_tabular_regression_sample.py | 20 ++ ...l_evaluation_text_classification_sample.py | 21 ++ ...valuation_text_entity_extraction_sample.py | 21 ++ ...aluation_text_sentiment_analysis_sample.py | 21 ++ ...luation_video_action_recognition_sample.py | 20 ++ ..._evaluation_video_classification_sample.py | 21 ++ ...evaluation_video_object_tracking_sample.py | 21 ++ .../param_handlers/get_model_sample.py | 21 ++ .../get_specialist_pool_sample.py | 21 ++ .../get_training_pipeline_sample.py | 21 ++ ...mage_classification_single_label_sample.py | 32 ++ ...port_data_image_object_detection_sample.py | 32 ++ .../param_handlers/import_data_sample.py | 33 ++ ...text_classification_single_label_sample.py | 32 ++ ...port_data_text_entity_extraction_sample.py | 32 ++ ...ort_data_text_sentiment_analysis_sample.py | 32 ++ ...rt_data_video_action_recognition_sample.py | 33 ++ ...import_data_video_classification_sample.py | 32 ++ ...mport_data_video_object_tracking_sample.py | 32 ++ .../list_annotation_specs_sample.py | 21 ++ .../param_handlers/list_annotations_sample.py | 21 ++ .../list_batch_prediction_jobs_sample.py | 21 ++ .../param_handlers/list_custom_jobs_sample.py | 21 ++ .../param_handlers/list_data_items_sample.py | 21 ++ .../list_data_labeling_jobs_sample.py | 21 ++ .../param_handlers/list_datasets_sample.py | 21 ++ .../param_handlers/list_endpoints_sample.py | 21 ++ .../list_hyperparameter_tuning_jobs_sample.py | 21 ++ .../list_model_evaluation_slices_sample.py | 21 ++ .../list_model_evaluations_sample.py | 21 ++ .../param_handlers/list_models_sample.py | 21 ++ .../list_specialist_pools_sample.py | 21 ++ .../list_training_pipelines_sample.py | 21 ++ .../predict_custom_trained_model_sample.py | 32 ++ .../predict_image_classification_sample.py | 41 +++ .../predict_image_file_sample.py | 38 ++ .../predict_image_object_detection_sample.py | 41 +++ .../param_handlers/predict_sample.py | 31 ++ .../predict_tabular_classification_sample.py | 33 ++ .../predict_tabular_regression_sample.py | 33 ++ ...text_classification_single_label_sample.py | 32 ++ .../predict_text_entity_extraction_sample.py | 33 ++ .../predict_text_sentiment_analysis_sample.py | 31 ++ .../search_migratable_resources_sample.py | 21 ++ .../param_handlers/undeploy_model_sample.py | 32 ++ .../update_annotation_spec_sample.py | 32 ++ .../param_handlers/update_dataset_sample.py | 32 ++ .../param_handlers/update_endpoint_sample.py | 32 ++ .../param_handlers/update_model_sample.py | 34 ++ .../update_specialist_pool_sample.py | 32 ++ .../upload_model_custom_container_sample.py | 39 ++ ..._explain_image_managed_container_sample.py | 69 ++++ ...xplain_tabular_managed_container_sample.py | 73 ++++ .../upload_model_managed_container_sample.py | 39 ++ .../param_handlers/upload_model_sample.py | 43 +++ .sample_configs/process_configs.yaml | 333 ++++++++++++++++++ .sample_configs/resource_info.yaml | 33 ++ .sample_configs/variants.yaml | 215 +++++++++++ 150 files changed, 5398 insertions(+) create mode 100644 .sample_configs/implementations.py create mode 100644 .sample_configs/param_handlers/batch_delete_data_items_sample.py create mode 100644 .sample_configs/param_handlers/batch_migrate_resources_sample.py create mode 100644 .sample_configs/param_handlers/cancel_batch_prediction_job_sample.py create mode 100644 .sample_configs/param_handlers/cancel_custom_job_sample.py create mode 100644 .sample_configs/param_handlers/cancel_data_labeling_job_sample.py create mode 100644 .sample_configs/param_handlers/cancel_hyperparameter_tuning_job_sample.py create mode 100644 .sample_configs/param_handlers/cancel_training_pipeline_sample.py create mode 100644 .sample_configs/param_handlers/create_annotation_sample.py create mode 100644 .sample_configs/param_handlers/create_annotation_spec_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_bigquery_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_custom_image_explain_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_custom_tabular_explain_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_tabular_explain_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_video_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_video_object_tracking_sample.py create mode 100644 .sample_configs/param_handlers/create_custom_job_python_package_sample.py create mode 100644 .sample_configs/param_handlers/create_custom_job_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_active_learning_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_image_segmentation_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_images_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_specialist_pool_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_text_sample.py create mode 100644 .sample_configs/param_handlers/create_data_labeling_job_video_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_image_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_tabular_bigquery_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_tabular_gcs_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_text_sample.py create mode 100644 .sample_configs/param_handlers/create_dataset_video_sample.py create mode 100644 .sample_configs/param_handlers/create_endpoint_sample.py create mode 100644 .sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py create mode 100644 .sample_configs/param_handlers/create_hyperparameter_tuning_job_sample.py create mode 100644 .sample_configs/param_handlers/create_specialist_pool_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_custom_job_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_custom_training_managed_dataset_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_image_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_image_object_detection_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_tabular_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_tabular_regression_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_text_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_text_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_video_action_recognition_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_video_classification_sample.py create mode 100644 .sample_configs/param_handlers/create_training_pipeline_video_object_tracking_sample.py create mode 100644 .sample_configs/param_handlers/delete_annotation_sample.py create mode 100644 .sample_configs/param_handlers/delete_annotation_spec_sample.py create mode 100644 .sample_configs/param_handlers/delete_batch_prediction_job_sample.py create mode 100644 .sample_configs/param_handlers/delete_custom_job_sample.py create mode 100644 .sample_configs/param_handlers/delete_data_item_sample.py create mode 100644 .sample_configs/param_handlers/delete_data_labeling_job_sample.py create mode 100644 .sample_configs/param_handlers/delete_dataset_sample.py create mode 100644 .sample_configs/param_handlers/delete_endpoint_sample.py create mode 100644 .sample_configs/param_handlers/delete_hyperparameter_tuning_job_sample.py create mode 100644 .sample_configs/param_handlers/delete_model_sample.py create mode 100644 .sample_configs/param_handlers/delete_specialist_pool_sample.py create mode 100644 .sample_configs/param_handlers/delete_training_pipeline_sample.py create mode 100644 .sample_configs/param_handlers/deploy_model_custom_trained_model_sample.py create mode 100644 .sample_configs/param_handlers/deploy_model_sample.py create mode 100644 .sample_configs/param_handlers/explain_custom_image_sample.py create mode 100644 .sample_configs/param_handlers/explain_custom_tabular_sample.py create mode 100644 .sample_configs/param_handlers/explain_sample.py create mode 100644 .sample_configs/param_handlers/explain_tabular_sample.py create mode 100644 .sample_configs/param_handlers/export_data_sample.py create mode 100644 .sample_configs/param_handlers/export_evaluated_data_items_sample.py create mode 100644 .sample_configs/param_handlers/export_model_sample.py create mode 100644 .sample_configs/param_handlers/export_model_tabular_classification_sample.py create mode 100644 .sample_configs/param_handlers/export_model_video_action_recognition_sample.py create mode 100644 .sample_configs/param_handlers/get_annotation_sample.py create mode 100644 .sample_configs/param_handlers/get_annotation_spec_sample.py create mode 100644 .sample_configs/param_handlers/get_batch_prediction_job_sample.py create mode 100644 .sample_configs/param_handlers/get_custom_job_sample.py create mode 100644 .sample_configs/param_handlers/get_data_item_sample.py create mode 100644 .sample_configs/param_handlers/get_data_labeling_job_sample.py create mode 100644 .sample_configs/param_handlers/get_dataset_sample.py create mode 100644 .sample_configs/param_handlers/get_endpoint_sample.py create mode 100644 .sample_configs/param_handlers/get_hyperparameter_tuning_job_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_image_classification_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_image_object_detection_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_slice_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_tabular_classification_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_tabular_regression_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_text_classification_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_text_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_text_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_video_action_recognition_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_video_classification_sample.py create mode 100644 .sample_configs/param_handlers/get_model_evaluation_video_object_tracking_sample.py create mode 100644 .sample_configs/param_handlers/get_model_sample.py create mode 100644 .sample_configs/param_handlers/get_specialist_pool_sample.py create mode 100644 .sample_configs/param_handlers/get_training_pipeline_sample.py create mode 100644 .sample_configs/param_handlers/import_data_image_classification_single_label_sample.py create mode 100644 .sample_configs/param_handlers/import_data_image_object_detection_sample.py create mode 100644 .sample_configs/param_handlers/import_data_sample.py create mode 100644 .sample_configs/param_handlers/import_data_text_classification_single_label_sample.py create mode 100644 .sample_configs/param_handlers/import_data_text_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/import_data_text_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/import_data_video_action_recognition_sample.py create mode 100644 .sample_configs/param_handlers/import_data_video_classification_sample.py create mode 100644 .sample_configs/param_handlers/import_data_video_object_tracking_sample.py create mode 100644 .sample_configs/param_handlers/list_annotation_specs_sample.py create mode 100644 .sample_configs/param_handlers/list_annotations_sample.py create mode 100644 .sample_configs/param_handlers/list_batch_prediction_jobs_sample.py create mode 100644 .sample_configs/param_handlers/list_custom_jobs_sample.py create mode 100644 .sample_configs/param_handlers/list_data_items_sample.py create mode 100644 .sample_configs/param_handlers/list_data_labeling_jobs_sample.py create mode 100644 .sample_configs/param_handlers/list_datasets_sample.py create mode 100644 .sample_configs/param_handlers/list_endpoints_sample.py create mode 100644 .sample_configs/param_handlers/list_hyperparameter_tuning_jobs_sample.py create mode 100644 .sample_configs/param_handlers/list_model_evaluation_slices_sample.py create mode 100644 .sample_configs/param_handlers/list_model_evaluations_sample.py create mode 100644 .sample_configs/param_handlers/list_models_sample.py create mode 100644 .sample_configs/param_handlers/list_specialist_pools_sample.py create mode 100644 .sample_configs/param_handlers/list_training_pipelines_sample.py create mode 100644 .sample_configs/param_handlers/predict_custom_trained_model_sample.py create mode 100644 .sample_configs/param_handlers/predict_image_classification_sample.py create mode 100644 .sample_configs/param_handlers/predict_image_file_sample.py create mode 100644 .sample_configs/param_handlers/predict_image_object_detection_sample.py create mode 100644 .sample_configs/param_handlers/predict_sample.py create mode 100644 .sample_configs/param_handlers/predict_tabular_classification_sample.py create mode 100644 .sample_configs/param_handlers/predict_tabular_regression_sample.py create mode 100644 .sample_configs/param_handlers/predict_text_classification_single_label_sample.py create mode 100644 .sample_configs/param_handlers/predict_text_entity_extraction_sample.py create mode 100644 .sample_configs/param_handlers/predict_text_sentiment_analysis_sample.py create mode 100644 .sample_configs/param_handlers/search_migratable_resources_sample.py create mode 100644 .sample_configs/param_handlers/undeploy_model_sample.py create mode 100644 .sample_configs/param_handlers/update_annotation_spec_sample.py create mode 100644 .sample_configs/param_handlers/update_dataset_sample.py create mode 100644 .sample_configs/param_handlers/update_endpoint_sample.py create mode 100644 .sample_configs/param_handlers/update_model_sample.py create mode 100644 .sample_configs/param_handlers/update_specialist_pool_sample.py create mode 100644 .sample_configs/param_handlers/upload_model_custom_container_sample.py create mode 100644 .sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py create mode 100644 .sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py create mode 100644 .sample_configs/param_handlers/upload_model_managed_container_sample.py create mode 100644 .sample_configs/param_handlers/upload_model_sample.py create mode 100644 .sample_configs/process_configs.yaml create mode 100644 .sample_configs/resource_info.yaml create mode 100644 .sample_configs/variants.yaml diff --git a/.sample_configs/implementations.py b/.sample_configs/implementations.py new file mode 100644 index 0000000000..f95d79adf2 --- /dev/null +++ b/.sample_configs/implementations.py @@ -0,0 +1,27 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def read_file(filename): + with open(filename, 'rb') as f: + file_content = f.read() + return file_content + + +def b64_encode(content): + return base64.b64encode(content).decode("utf-8") + + +def to_protobuf_value(d): + return json_format.ParseDict(d, Value()) diff --git a/.sample_configs/param_handlers/batch_delete_data_items_sample.py b/.sample_configs/param_handlers/batch_delete_data_items_sample.py new file mode 100644 index 0000000000..5755a0b895 --- /dev/null +++ b/.sample_configs/param_handlers/batch_delete_data_items_sample.py @@ -0,0 +1,27 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in batch_delete_data_items_sample + parent = parent + + return parent + +def make_names(data_item_name_1: str, data_item_name_2: str) -> typing.Sequence[str]: + # The list of full name of data items in the same dataset to be deleted. + names = [data_item_name_1, data_item_name_2] + + return names + diff --git a/.sample_configs/param_handlers/batch_migrate_resources_sample.py b/.sample_configs/param_handlers/batch_migrate_resources_sample.py new file mode 100644 index 0000000000..e798a393f1 --- /dev/null +++ b/.sample_configs/param_handlers/batch_migrate_resources_sample.py @@ -0,0 +1,27 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in batch_migrate_resources_sample + parent = parent + + return parent + +def make_migrate_resource_requests(migrate_resource_requests: typing.Sequence[google.cloud.aiplatform_v1beta1.types.migration_service.MigrateResourceRequest]) -> typing.Sequence[google.cloud.aiplatform_v1beta1.types.migration_service.MigrateResourceRequest]: + # Sample function parameter migrate_resource_requests in batch_migrate_resources_sample + migrate_resource_requests = migrate_resource_requests + + return migrate_resource_requests + diff --git a/.sample_configs/param_handlers/cancel_batch_prediction_job_sample.py b/.sample_configs/param_handlers/cancel_batch_prediction_job_sample.py new file mode 100644 index 0000000000..06e9adafda --- /dev/null +++ b/.sample_configs/param_handlers/cancel_batch_prediction_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in cancel_batch_prediction_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/cancel_custom_job_sample.py b/.sample_configs/param_handlers/cancel_custom_job_sample.py new file mode 100644 index 0000000000..918638d510 --- /dev/null +++ b/.sample_configs/param_handlers/cancel_custom_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in cancel_custom_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/cancel_data_labeling_job_sample.py b/.sample_configs/param_handlers/cancel_data_labeling_job_sample.py new file mode 100644 index 0000000000..b11a0a22ff --- /dev/null +++ b/.sample_configs/param_handlers/cancel_data_labeling_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in cancel_data_labeling_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/cancel_hyperparameter_tuning_job_sample.py b/.sample_configs/param_handlers/cancel_hyperparameter_tuning_job_sample.py new file mode 100644 index 0000000000..0a5e5a5b08 --- /dev/null +++ b/.sample_configs/param_handlers/cancel_hyperparameter_tuning_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in cancel_hyperparameter_tuning_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/cancel_training_pipeline_sample.py b/.sample_configs/param_handlers/cancel_training_pipeline_sample.py new file mode 100644 index 0000000000..bcdda26c53 --- /dev/null +++ b/.sample_configs/param_handlers/cancel_training_pipeline_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in cancel_training_pipeline_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/create_annotation_sample.py b/.sample_configs/param_handlers/create_annotation_sample.py new file mode 100644 index 0000000000..31031b7794 --- /dev/null +++ b/.sample_configs/param_handlers/create_annotation_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_annotation_sample + parent = parent + + return parent + +def make_annotation(payload_schema_uri: str) -> google.cloud.aiplatform_v1alpha1.types.annotation.Annotation: + payload_dict = {} + payload = to_protobuf_value(payload_dict) + + annotation = { + 'payload_schema_uri': '', + 'payload': payload + } + + return annotation + diff --git a/.sample_configs/param_handlers/create_annotation_spec_sample.py b/.sample_configs/param_handlers/create_annotation_spec_sample.py new file mode 100644 index 0000000000..142b415fdf --- /dev/null +++ b/.sample_configs/param_handlers/create_annotation_spec_sample.py @@ -0,0 +1,28 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_annotation_spec_sample + parent = parent + + return parent + +def make_annotation_spec(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.annotation_spec.AnnotationSpec: + annotation_spec = { + 'display_name': display_name + } + + return annotation_spec + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_bigquery_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_bigquery_sample.py new file mode 100644 index 0000000000..d11245c50c --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_bigquery_sample.py @@ -0,0 +1,47 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_batch_prediction_job_bigquery_sample + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, instances_format: str, bigquery_source_input_uri: str, predictions_format: str, bigquery_destination_output_uri: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': instances_format, + 'bigquery_source': { + 'input_uri': bigquery_source_input_uri + } + }, + 'output_config': { + 'predictions_format': predictions_format, + 'bigquery_destination': { + 'output_uri': bigquery_destination_output_uri + } + }, + # optional + 'generate_explanation': True + } + + return batch_prediction_job diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_custom_image_explain_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_custom_image_explain_sample.py new file mode 100644 index 0000000000..e13d413d05 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_custom_image_explain_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + + +def make_batch_prediction_job(display_name: str, model_name: str, instances_format: str, gcs_source_uri: str, predictions_format: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': instances_format, + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': predictions_format, + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + }, + 'dedicated_resources': { + 'machine_spec': { + 'machine_type': 'n1-standard-2', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'starting_replica_count': 1, + 'max_replica_count': 1 + }, + 'generate_explanation': True + } + + return batch_prediction_job diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_custom_tabular_explain_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_custom_tabular_explain_sample.py new file mode 100644 index 0000000000..423a8a11d2 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_custom_tabular_explain_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, instances_format: str, gcs_source_uri: str, predictions_format: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': instances_format, + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': predictions_format, + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + }, + 'dedicated_resources': { + 'machine_spec': { + 'machine_type': 'n1-standard-2', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'starting_replica_count': 1, + 'max_replica_count': 1 + }, + 'generate_explanation': True + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_sample.py new file mode 100644 index 0000000000..27701c62e7 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_sample.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_batch_prediction_job_sample + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, instances_format: str, gcs_source_uri: str, predictions_format: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': instances_format, + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': predictions_format, + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + }, + 'dedicated_resources': { + 'machine_spec': { + 'machine_type': 'n1-standard-2', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'starting_replica_count': 1, + 'max_replica_count': 1 + } + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_tabular_explain_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_tabular_explain_sample.py new file mode 100644 index 0000000000..63cc160a4f --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_tabular_explain_sample.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, instances_format: str, gcs_source_uri: str, predictions_format: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': instances_format, + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': predictions_format, + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + }, + 'dedicated_resources': { + 'machine_spec': { + 'machine_type': 'n1-standard-2', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'starting_replica_count': 1, + 'max_replica_count': 1 + }, + 'generate_explanation': True + } + + return batch_prediction_job diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py new file mode 100644 index 0000000000..0cac68a867 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_batch_prediction_job( + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, +) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py new file mode 100644 index 0000000000..0cac68a867 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_batch_prediction_job( + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, +) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..0cac68a867 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_batch_prediction_job( + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, +) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": Value(), + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py new file mode 100644 index 0000000000..56c9412eb3 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py @@ -0,0 +1,49 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_batch_prediction_job_video_action_recognition_sample + parent = parent + + return parent + + +def make_batch_prediction_job( + display_name: str, + model: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, +) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = { + "confidenceThreshold": 0.5, + } + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model, + "model_parameters": model_parameters, + "input_config": { + "instances_format": "jsonl", + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": "jsonl", + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + + return batch_prediction_job diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_video_classification_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_video_classification_sample.py new file mode 100644 index 0000000000..4339a278d2 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_video_classification_sample.py @@ -0,0 +1,52 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_batch_prediction_job_sample + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = { + 'confidenceThreshold': 0.5, + 'maxPredictions': 10000, + 'segmentClassification': True, + 'shotClassification': True, + 'oneSecIntervalClassification': True + } + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': 'jsonl', + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': 'jsonl', + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + } + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_video_object_tracking_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_video_object_tracking_sample.py new file mode 100644 index 0000000000..3116a47866 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_video_object_tracking_sample.py @@ -0,0 +1,48 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_batch_prediction_job_video_object_tracking_sample + parent = parent + + return parent + +def make_batch_prediction_job(display_name: str, model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = { + 'confidenceThreshold': 0.0 + } + model_parameters = to_protobuf_value(model_parameters_dict) + + batch_prediction_job = { + 'display_name': display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + 'model': model_name, + 'model_parameters': model_parameters, + 'input_config': { + 'instances_format': "jsonl", + 'gcs_source': { + 'uris': [gcs_source_uri] + }, + }, + 'output_config': { + 'predictions_format': "jsonl", + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + }, + } + } + + return batch_prediction_job + diff --git a/.sample_configs/param_handlers/create_custom_job_python_package_sample.py b/.sample_configs/param_handlers/create_custom_job_python_package_sample.py new file mode 100644 index 0000000000..634860c5e3 --- /dev/null +++ b/.sample_configs/param_handlers/create_custom_job_python_package_sample.py @@ -0,0 +1,41 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_custom_job_python_package_sample + parent = parent + + return parent + +def make_custom_job(display_name: str, package_executor_image_uri: str, gcs_python_package_uri: str) -> google.cloud.aiplatform_v1alpha1.types.custom_job.CustomJob: + custom_job = { + 'display_name': display_name, + 'job_spec': { + 'worker_pool_specs': [{ + 'machine_spec': { + 'machine_type': 'n1-standard-2', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'replica_count': 1, + 'python_package_spec': { + 'executor_image_uri': package_executor_image_uri, + 'package_uris': [gcs_python_package_uri] + } + }] + } + } + + return custom_job diff --git a/.sample_configs/param_handlers/create_custom_job_sample.py b/.sample_configs/param_handlers/create_custom_job_sample.py new file mode 100644 index 0000000000..e816848635 --- /dev/null +++ b/.sample_configs/param_handlers/create_custom_job_sample.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_custom_job_sample + parent = parent + + return parent + +def make_custom_job(display_name: str, container_image_uri: str) -> google.cloud.aiplatform_v1alpha1.types.custom_job.CustomJob: + custom_job = { + 'display_name': display_name, + 'job_spec': { + 'worker_pool_specs': [{ + 'machine_spec': { + 'machine_type': 'n1-standard-4', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'replica_count': 1, + 'container_spec': { + 'image_uri': container_image_uri, + 'command': [], + 'args': [] + } + }] + } + } + + return custom_job + diff --git a/.sample_configs/param_handlers/create_data_labeling_job_active_learning_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_active_learning_sample.py new file mode 100644 index 0000000000..7c50f53602 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_active_learning_sample.py @@ -0,0 +1,49 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_active_learning_sample + parent = parent + + return parent + + +def make_data_labeling_job( + display_name: str, + dataset: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: str, +) -> google.cloud.aiplatform_v1beta1.types.data_labeling_job.DataLabelingJob: + inputs_dict = {"annotation_specs": [annotation_spec]} + inputs = to_protobuf_value(inputs_dict) + + active_learning_config = {"max_data_item_count": 1} + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_active_learning" + }, + "active_learning_config": active_learning_config, + } + + return data_labeling_job diff --git a/.sample_configs/param_handlers/create_data_labeling_job_image_segmentation_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_image_segmentation_sample.py new file mode 100644 index 0000000000..a6e43e6d68 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_image_segmentation_sample.py @@ -0,0 +1,47 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_image_segmentation_sample + parent = parent + + return parent + + +def make_data_labeling_job( + display_name: str, + dataset: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: dict, + annotation_set_name: str +) -> google.cloud.aiplatform_v1beta1.types.data_labeling_job.DataLabelingJob: + inputs_dict = {"annotationSpecColors": [annotation_spec]} + inputs = to_protobuf_value(inputs_dict) + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": annotation_set_name + }, + } + + return data_labeling_job diff --git a/.sample_configs/param_handlers/create_data_labeling_job_images_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_images_sample.py new file mode 100644 index 0000000000..5aa885dc79 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_images_sample.py @@ -0,0 +1,42 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_sample + parent = parent + + return parent + +def make_data_labeling_job(display_name: str, dataset: str, instruction_uri: str, annotation_spec: str) -> google.cloud.aiplatform_v1alpha1.types.data_labeling_job.DataLabelingJob: + inputs_dict = { + "annotation_specs": [annotation_spec] + } + inputs = to_protobuf_value(inputs_dict) + + data_labeling_job = { + 'display_name': display_name, + # Full resource name: projects/{project_id}/locations/{location}/datasets/{dataset_id} + 'datasets': [dataset], + # labeler_count must be 1, 3, or 5 + 'labeler_count': 1, + 'instruction_uri': instruction_uri, + 'inputs_schema_uri': 'gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml', + 'inputs': inputs, + 'annotation_labels': { + 'aiplatform.googleapis.com/annotation_set_name': 'my_test_saved_query' + } + } + + return data_labeling_job diff --git a/.sample_configs/param_handlers/create_data_labeling_job_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_sample.py new file mode 100644 index 0000000000..d1b839ed87 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_sample.py @@ -0,0 +1,42 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_sample + parent = parent + + return parent + +def make_data_labeling_job(display_name: str, dataset_name: str, instruction_uri: str, inputs_schema_uri: str, annotation_spec: str) -> google.cloud.aiplatform_v1alpha1.types.data_labeling_job.DataLabelingJob: + inputs_dict = { + "annotation_specs": [annotation_spec] + } + inputs = to_protobuf_value(inputs_dict) + + data_labeling_job = { + 'display_name': display_name, + # Full resource name: projects/{project_id}/locations/{location}/datasets/{dataset_id} + 'datasets': [dataset_name], + # labeler_count must be 1, 3, or 5 + 'labeler_count': 1, + 'instruction_uri': instruction_uri, + 'inputs_schema_uri': inputs_schema_uri, + 'inputs': inputs, + 'annotation_labels': { + 'aiplatform.googleapis.com/annotation_set_name': 'my_test_saved_query' + } + } + + return data_labeling_job \ No newline at end of file diff --git a/.sample_configs/param_handlers/create_data_labeling_job_specialist_pool_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_specialist_pool_sample.py new file mode 100644 index 0000000000..3c833d7332 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_specialist_pool_sample.py @@ -0,0 +1,49 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_specialist_pool_sample + parent = parent + + return parent + + +def make_data_labeling_job( + display_name: str, + dataset: str, + specialist_pool: str, + instruction_uri: str, + inputs_schema_uri: str, + annotation_spec: str, +) -> google.cloud.aiplatform_v1beta1.types.data_labeling_job.DataLabelingJob: + inputs_dict = {"annotation_specs": [annotation_spec]} + inputs = to_protobuf_value(inputs_dict) + + data_labeling_job = { + "display_name": display_name, + # Full resource name: projects/{project}/locations/{location}/datasets/{dataset_id} + "datasets": [dataset], + "labeler_count": 1, + "instruction_uri": instruction_uri, + "inputs_schema_uri": inputs_schema_uri, + "inputs": inputs, + "annotation_labels": { + "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool" + }, + # Full resource name: projects/{project}/locations/{location}/specialistPools/{specialist_pool_id} + "specialist_pools": [specialist_pool], + } + + return data_labeling_job diff --git a/.sample_configs/param_handlers/create_data_labeling_job_text_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_text_sample.py new file mode 100644 index 0000000000..2e67b157f0 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_text_sample.py @@ -0,0 +1,27 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_text_sample + parent = parent + + return parent + +def make_data_labeling_job(data_labeling_job: google.cloud.aiplatform_v1alpha1.types.data_labeling_job.DataLabelingJob) -> google.cloud.aiplatform_v1alpha1.types.data_labeling_job.DataLabelingJob: + # Sample function parameter data_labeling_job in create_data_labeling_job_text_sample + data_labeling_job = data_labeling_job + + return data_labeling_job + diff --git a/.sample_configs/param_handlers/create_data_labeling_job_video_sample.py b/.sample_configs/param_handlers/create_data_labeling_job_video_sample.py new file mode 100644 index 0000000000..c15bf82ce5 --- /dev/null +++ b/.sample_configs/param_handlers/create_data_labeling_job_video_sample.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_data_labeling_job_sample + parent = parent + + return parent + +def make_data_labeling_job(display_name: str, dataset: str, instruction_uri: str, annotation_spec: str) -> google.cloud.aiplatform_v1alpha1.types.data_labeling_job.DataLabelingJob: + inputs_dict = { + "annotation_specs": [annotation_spec] + } + inputs = to_protobuf_value(inputs_dict) + + data_labeling_job = { + 'display_name': display_name, + # Full resource name: projects/{project_id}/locations/{location}/datasets/{dataset_id} + 'datasets': [dataset], + # labeler_count must be 1, 3, or 5 + 'labeler_count': 1, + 'instruction_uri': instruction_uri, + 'inputs_schema_uri': 'gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/video_classification_1.0.0.yaml', + 'inputs': inputs, + 'annotation_labels': { + 'aiplatform.googleapis.com/annotation_set_name': 'my_test_saved_query' + } + } + + return data_labeling_job + diff --git a/.sample_configs/param_handlers/create_dataset_image_sample.py b/.sample_configs/param_handlers/create_dataset_image_sample.py new file mode 100644 index 0000000000..9680e8ab1a --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_image_sample.py @@ -0,0 +1,29 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_dataset_image_sample + parent = parent + + return parent + +def make_dataset(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': 'gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml' + } + + return dataset + diff --git a/.sample_configs/param_handlers/create_dataset_sample.py b/.sample_configs/param_handlers/create_dataset_sample.py new file mode 100644 index 0000000000..aea7f3b5ea --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_sample.py @@ -0,0 +1,29 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_dataset_sample + parent = parent + + return parent + +def make_dataset(display_name: str, metadata_schema_uri: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': metadata_schema_uri, + } + + return dataset + diff --git a/.sample_configs/param_handlers/create_dataset_tabular_bigquery_sample.py b/.sample_configs/param_handlers/create_dataset_tabular_bigquery_sample.py new file mode 100644 index 0000000000..4a56e401f8 --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_tabular_bigquery_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_dataset(display_name: str, bigquery_uri: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + metadata_dict = { + 'input_config': { + 'bigquery_source': { + 'uri': bigquery_uri + } + } + } + metadata = to_protobuf_value(metadata_dict) + + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': "gs://google-cloud-aiplatform/schema/dataset/metadata/tabular_1.0.0.yaml", + 'metadata': metadata + } + + return dataset \ No newline at end of file diff --git a/.sample_configs/param_handlers/create_dataset_tabular_gcs_sample.py b/.sample_configs/param_handlers/create_dataset_tabular_gcs_sample.py new file mode 100644 index 0000000000..8ee3a8f209 --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_tabular_gcs_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_dataset(display_name: str, gcs_uri: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + metadata_dict = { + 'input_config': { + 'gcs_source': { + 'uri': [gcs_uri] + } + } + } + metadata = to_protobuf_value(metadata_dict) + + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': "gs://google-cloud-aiplatform/schema/dataset/metadata/tabular_1.0.0.yaml", + 'metadata': metadata + } + + return dataset \ No newline at end of file diff --git a/.sample_configs/param_handlers/create_dataset_text_sample.py b/.sample_configs/param_handlers/create_dataset_text_sample.py new file mode 100644 index 0000000000..7bdc972548 --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_text_sample.py @@ -0,0 +1,29 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_dataset_sample + parent = parent + + return parent + +def make_dataset(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': 'gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml' + } + + return dataset + diff --git a/.sample_configs/param_handlers/create_dataset_video_sample.py b/.sample_configs/param_handlers/create_dataset_video_sample.py new file mode 100644 index 0000000000..7c14bd1b5c --- /dev/null +++ b/.sample_configs/param_handlers/create_dataset_video_sample.py @@ -0,0 +1,29 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_dataset_image_sample + parent = parent + + return parent + +def make_dataset(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + dataset = { + 'display_name': display_name, + 'metadata_schema_uri': 'gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml' + } + + return dataset + diff --git a/.sample_configs/param_handlers/create_endpoint_sample.py b/.sample_configs/param_handlers/create_endpoint_sample.py new file mode 100644 index 0000000000..12f850fd9d --- /dev/null +++ b/.sample_configs/param_handlers/create_endpoint_sample.py @@ -0,0 +1,28 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_endpoint_sample + parent = parent + + return parent + +def make_endpoint(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.endpoint.Endpoint: + endpoint = { + 'display_name': display_name + } + + return endpoint + diff --git a/.sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py b/.sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py new file mode 100644 index 0000000000..1109e3711f --- /dev/null +++ b/.sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py @@ -0,0 +1,89 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_hyperparameter_tuning_job_using_python_package_sample + parent = parent + + return parent + + +def make_hyperparameter_tuning_job( + display_name: str, executor_image_uri: str, package_uri: str, python_module: str, +) -> google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job.HyperparameterTuningJob: + + # study_spec + metric = { + "metric_id": "val_rmse", + "goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MINIMIZE, + } + + conditional_parameter_decay = { + "parameter_spec": { + "parameter_id": "decay", + "double_value_spec": {"min_value": 1e-07, "max_value": 1}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + }, + "parent_discrete_values": {"values": [32, 64]}, + } + conditional_parameter_learning_rate = { + "parameter_spec": { + "parameter_id": "learning_rate", + "double_value_spec": {"min_value": 1e-07, "max_value": 1}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + }, + "parent_discrete_values": {"values": [4, 8, 16]}, + } + parameter = { + "parameter_id": "batch_size", + "discrete_value_spec": {"values": [4, 8, 16, 32, 64, 128]}, + "scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE, + "conditional_parameter_specs": [ + conditional_parameter_decay, + conditional_parameter_learning_rate, + ], + } + + # trial_job_spec + machine_spec = { + "machine_type": "n1-standard-4", + "accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + "accelerator_count": 1, + } + worker_pool_spec = { + "machine_spec": machine_spec, + "replica_count": 1, + "python_package_spec": { + "executor_image_uri": executor_image_uri, + "package_uris": [package_uri], + "python_module": python_module, + "args": [], + }, + } + + # hyperparameter_tuning_job + hyperparameter_tuning_job = { + "display_name": display_name, + "max_trial_count": 4, + "parallel_trial_count": 2, + "study_spec": { + "metrics": [metric], + "parameters": [parameter], + "algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH, + }, + "trial_job_spec": {"worker_pool_specs": [worker_pool_spec]}, + } + + return hyperparameter_tuning_job diff --git a/.sample_configs/param_handlers/create_hyperparameter_tuning_job_sample.py b/.sample_configs/param_handlers/create_hyperparameter_tuning_job_sample.py new file mode 100644 index 0000000000..257f1931d6 --- /dev/null +++ b/.sample_configs/param_handlers/create_hyperparameter_tuning_job_sample.py @@ -0,0 +1,64 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_hyperparameter_tuning_job_sample + parent = parent + + return parent + +def make_hyperparameter_tuning_job(display_name: str, container_image_uri: str) -> google.cloud.aiplatform_v1alpha1.types.hyperparameter_tuning_job.HyperparameterTuningJob: + hyperparameter_tuning_job = { + 'display_name': display_name, + 'max_trial_count': 2, + 'parallel_trial_count': 1, + 'max_failed_trial_count': 1, + 'study_spec': { + 'metrics': [ + { + 'metric_id': 'accuracy', + 'goal': aiplatform.gapic.StudySpec.MetricSpec.GoalType.MAXIMIZE + } + ], + 'parameters': [ + { + # Learning rate. + 'parameter_id': 'lr', + 'double_value_spec': { + 'min_value': 0.001, + 'max_value': 0.1 + } + }, + ] + }, + 'trial_job_spec': { + 'worker_pool_specs': [{ + 'machine_spec': { + 'machine_type': 'n1-standard-4', + 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + 'accelerator_count': 1 + }, + 'replica_count': 1, + 'container_spec': { + 'image_uri': container_image_uri, + 'command': [], + 'args': [] + } + }] + } + } + + return hyperparameter_tuning_job + diff --git a/.sample_configs/param_handlers/create_specialist_pool_sample.py b/.sample_configs/param_handlers/create_specialist_pool_sample.py new file mode 100644 index 0000000000..e15fc48142 --- /dev/null +++ b/.sample_configs/param_handlers/create_specialist_pool_sample.py @@ -0,0 +1,28 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_specialist_pool_sample + parent = parent + + return parent + +def make_specialist_pool(display_name: str) -> google.cloud.aiplatform_v1alpha1.types.specialist_pool.SpecialistPool: + specialist_pool = { + 'display_name': display_name + } + + return specialist_pool + diff --git a/.sample_configs/param_handlers/create_training_pipeline_custom_job_sample.py b/.sample_configs/param_handlers/create_training_pipeline_custom_job_sample.py new file mode 100644 index 0000000000..0e3d7a6a62 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_custom_job_sample.py @@ -0,0 +1,65 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_custom_job_sample + parent = parent + + return parent + + +def make_training_pipeline( + display_name: str, + model_display_name: str, + container_image_uri: str, + base_output_directory_prefix: str, +) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + + training_task_inputs_dict = { + "workerPoolSpecs": [ + { + "replicaCount": 1, + "machineSpec": {"machineType": "n1-standard-4"}, + "containerSpec": { + # A working docker image can be found at gs://cloud-samples-data/ai-platform/mnist_tfrecord/custom_job + "imageUri": container_image_uri, + "args": [ + # AIP_MODEL_DIR is set by the service according to baseOutputDirectory. + "--model_dir=$(AIP_MODEL_DIR)", + ], + }, + } + ], + "baseOutputDirectory": { + # The GCS location for outputs must be accessible by the project's AI Platform service account. + "output_uri_prefix": base_output_directory_prefix + }, + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_task_definition = "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" + image_uri = "gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest" + + training_pipeline = { + "display_name": display_name, + "training_task_definition": training_task_definition, + "training_task_inputs": training_task_inputs, + "model_to_upload": { + "display_name": model_display_name, + "container_spec": {"image_uri": image_uri}, + }, + } + + return training_pipeline diff --git a/.sample_configs/param_handlers/create_training_pipeline_custom_training_managed_dataset_sample.py b/.sample_configs/param_handlers/create_training_pipeline_custom_training_managed_dataset_sample.py new file mode 100644 index 0000000000..80da32cbd1 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_custom_training_managed_dataset_sample.py @@ -0,0 +1,80 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_custom_training_with_managed_dataset_sample + parent = parent + + return parent + + +def make_training_pipeline( + display_name: str, + model_display_name: str, + dataset_id: str, + annotation_schema_uri: str, + training_container_spec_image_uri: str, + model_container_spec_image_uri: str, + base_output_uri_prefix: str, +) -> google.cloud.aiplatform_v1beta1.types.training_pipeline.TrainingPipeline: + + # input_data_config + input_data_config = { + "dataset_id": dataset_id, + "annotation_schema_uri": annotation_schema_uri, + "gcs_destination": {"output_uri_prefix": base_output_uri_prefix}, + } + + # training_task_definition + custom_task_definition = "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" + + # training_task_inputs + training_container_spec = { + "imageUri": training_container_spec_image_uri, + # AIP_MODEL_DIR is set by the service according to baseOutputDirectory. + "args": ["--model-dir=$(AIP_MODEL_DIR)"], + } + + training_worker_pool_spec = { + "replicaCount": 1, + "machineSpec": {"machineType": "n1-standard-8"}, + "containerSpec": training_container_spec, + } + + training_task_inputs_dict = { + "workerPoolSpecs": [training_worker_pool_spec], + "baseOutputDirectory": {"outputUriPrefix": base_output_uri_prefix}, + } + + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + # model_to_upload + model_container_spec = { + "image_uri": model_container_spec_image_uri, + "command": [], + "args": [], + } + + model = {"display_name": model_display_name, "container_spec": model_container_spec} + + training_pipeline = { + "display_name": display_name, + "input_data_config": input_data_config, + "training_task_definition": custom_task_definition, + "training_task_inputs": training_task_inputs, + "model_to_upload": model, + } + + return training_pipeline diff --git a/.sample_configs/param_handlers/create_training_pipeline_image_classification_sample.py b/.sample_configs/param_handlers/create_training_pipeline_image_classification_sample.py new file mode 100644 index 0000000000..34e50004a9 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_image_classification_sample.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = { + "multiLabel": True, + "modelType": "CLOUD", + "budgetMilliNodeHours": 8000, + "disableEarlyStopping": False + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_image_object_detection_sample.py b/.sample_configs/param_handlers/create_training_pipeline_image_object_detection_sample.py new file mode 100644 index 0000000000..99907515a6 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_image_object_detection_sample.py @@ -0,0 +1,42 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = { + "modelType": "CLOUD_HIGH_ACCURACY_1", + "budgetMilliNodeHours": 20000, + "disableEarlyStopping": False + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_object_detection_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_sample.py b/.sample_configs/param_handlers/create_training_pipeline_sample.py new file mode 100644 index 0000000000..20deb73576 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_sample.py @@ -0,0 +1,44 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_sample + parent = parent + + return parent + +def make_training_pipeline(display_name: str, training_task_definition: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = { + "multiLabel": True, + "modelType": "CLOUD", + "budgetMilliNodeHours": 8000, + "disableEarlyStopping": False + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': training_task_definition, + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_tabular_classification_sample.py b/.sample_configs/param_handlers/create_training_pipeline_tabular_classification_sample.py new file mode 100644 index 0000000000..90c06bce1b --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_tabular_classification_sample.py @@ -0,0 +1,72 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str, target_column: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + # set the columns used for training and their data types + transformations = [ + {"auto": {"column_name": "sepal_width"}}, + {"auto": {"column_name": "sepal_length"}}, + {"auto": {"column_name": "petal_length"}}, + {"auto": {"column_name": "petal_width"}} + ] + + training_task_inputs_dict = { + # required inputs + "targetColumn": target_column, + "predictionType": "classification", + "transformations": transformations, + "trainBudgetMilliNodeHours": 8000, + + # optional inputs + "disableEarlyStopping": False, + # supported binary classification optimisation objectives: + # maximize-au-roc, minimize-log-loss, maximize-au-prc, + # maximize-precision-at-recall, maximize-recall-at-precision + # supported multi-class classification optimisation objective: + # minimize-log-loss + "optimizationObjective": "minimize-log-loss", + + # possibly required inputs + # required when using maximize-precision-at-recall + # "optimizationObjectiveRecallValue": 0.5, # 0.0 - 1.0 + # required when using maximize-recall-at-precision + # "optimizationObjectivePrecisionValue": 0.5, # 0.0 - 1.0 + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tabular_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id, + 'fraction_split': { + 'training_fraction': 0.8, + 'validation_fraction': 0.1, + 'test_fraction': 0.1, + } + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_tabular_regression_sample.py b/.sample_configs/param_handlers/create_training_pipeline_tabular_regression_sample.py new file mode 100644 index 0000000000..9998072c97 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_tabular_regression_sample.py @@ -0,0 +1,77 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str, target_column: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + # set the columns used for training and their data types + transformations = [ + {"auto": {"column_name": "STRING_5000unique_NULLABLE"}}, + {"auto": {"column_name": "INTEGER_5000unique_NULLABLE"}}, + {"auto": {"column_name": "FLOAT_5000unique_NULLABLE"}}, + {"auto": {"column_name": "FLOAT_5000unique_REPEATED"}}, + {"auto": {"column_name": "NUMERIC_5000unique_NULLABLE"}}, + {"auto": {"column_name": "BOOLEAN_2unique_NULLABLE"}}, + {"timestamp": {"column_name": "TIMESTAMP_1unique_NULLABLE", "invalid_values_allowed": True}}, + {"auto": {"column_name": "DATE_1unique_NULLABLE"}}, + {"auto": {"column_name": "TIME_1unique_NULLABLE"}}, + {"timestamp": {"column_name": "DATETIME_1unique_NULLABLE", "invalid_values_allowed": True}}, + {"auto": {"column_name": "STRUCT_NULLABLE.STRING_5000unique_NULLABLE"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.INTEGER_5000unique_NULLABLE"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_NULLABLE"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_REQUIRED"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.FLOAT_5000unique_REPEATED"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.NUMERIC_5000unique_NULLABLE"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.BOOLEAN_2unique_NULLABLE"}}, + {"auto": {"column_name": "STRUCT_NULLABLE.TIMESTAMP_1unique_NULLABLE"}} + ] + + training_task_inputs_dict = { + # required inputs + "targetColumn": target_column, + "predictionType": "regression", + "transformations": transformations, + "trainBudgetMilliNodeHours": 8000, + + # optional inputs + "disableEarlyStopping": False, + # supported regression optimisation objectives: minimize-rmse, + # minimize-mae, minimize-rmsle + "optimizationObjective": "minimize-rmse", + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tabular_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id, + 'fraction_split': { + 'training_fraction': 0.8, + 'validation_fraction': 0.1, + 'test_fraction': 0.1, + } + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_text_classification_sample.py b/.sample_configs/param_handlers/create_training_pipeline_text_classification_sample.py new file mode 100644 index 0000000000..faa5c52af5 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_text_classification_sample.py @@ -0,0 +1,36 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = {} + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_text_entity_extraction_sample.py b/.sample_configs/param_handlers/create_training_pipeline_text_entity_extraction_sample.py new file mode 100644 index 0000000000..569d6051b3 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_text_entity_extraction_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = {} + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_extraction_1.0.0.yaml", + # Training task inputs are empty for text entity extraction + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..db597c6611 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + # Use sentiment_max of 4 + training_task_inputs_dict = {"sentiment_max": 4} + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_sentiment_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_video_action_recognition_sample.py b/.sample_configs/param_handlers/create_training_pipeline_video_action_recognition_sample.py new file mode 100644 index 0000000000..df3148f82d --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_video_action_recognition_sample.py @@ -0,0 +1,40 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_video_action_recognition_sample + parent = parent + + return parent + + +def make_training_pipeline( + display_name: str, dataset_id: str, model_display_name: str, model_type: str +) -> google.cloud.aiplatform_v1beta1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = { + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + "modelType": model_type + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + "display_name": display_name, + "training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_action_recognition_1.0.0.yaml", + "training_task_inputs": training_task_inputs, + "input_data_config": {"dataset_id": dataset_id}, + "model_to_upload": {"display_name": model_display_name}, + } + + return training_pipeline diff --git a/.sample_configs/param_handlers/create_training_pipeline_video_classification_sample.py b/.sample_configs/param_handlers/create_training_pipeline_video_classification_sample.py new file mode 100644 index 0000000000..6c695979fe --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_video_classification_sample.py @@ -0,0 +1,40 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_video_classification_sample + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = {} + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_classification_1.0.0.yaml", + # Training task inputs are empty for video classification + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/create_training_pipeline_video_object_tracking_sample.py b/.sample_configs/param_handlers/create_training_pipeline_video_object_tracking_sample.py new file mode 100644 index 0000000000..374a2c0e91 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_video_object_tracking_sample.py @@ -0,0 +1,41 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in create_training_pipeline_video_classification_sample + parent = parent + + return parent + +def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + training_task_inputs_dict = { + "modelType": "CLOUD" + } + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + 'display_name': display_name, + 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_object_tracking_1.0.0.yaml", + 'training_task_inputs': training_task_inputs, + 'input_data_config': { + 'dataset_id': dataset_id + }, + 'model_to_upload': { + 'display_name': model_display_name + } + } + + return training_pipeline + diff --git a/.sample_configs/param_handlers/delete_annotation_sample.py b/.sample_configs/param_handlers/delete_annotation_sample.py new file mode 100644 index 0000000000..06927a77b7 --- /dev/null +++ b/.sample_configs/param_handlers/delete_annotation_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_annotation_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_annotation_spec_sample.py b/.sample_configs/param_handlers/delete_annotation_spec_sample.py new file mode 100644 index 0000000000..f10c564431 --- /dev/null +++ b/.sample_configs/param_handlers/delete_annotation_spec_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_annotation_spec_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_batch_prediction_job_sample.py b/.sample_configs/param_handlers/delete_batch_prediction_job_sample.py new file mode 100644 index 0000000000..3cf160f6eb --- /dev/null +++ b/.sample_configs/param_handlers/delete_batch_prediction_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_batch_prediction_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_custom_job_sample.py b/.sample_configs/param_handlers/delete_custom_job_sample.py new file mode 100644 index 0000000000..023e032c57 --- /dev/null +++ b/.sample_configs/param_handlers/delete_custom_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_custom_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_data_item_sample.py b/.sample_configs/param_handlers/delete_data_item_sample.py new file mode 100644 index 0000000000..37b93503e9 --- /dev/null +++ b/.sample_configs/param_handlers/delete_data_item_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_data_item_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_data_labeling_job_sample.py b/.sample_configs/param_handlers/delete_data_labeling_job_sample.py new file mode 100644 index 0000000000..bb54e9f9fe --- /dev/null +++ b/.sample_configs/param_handlers/delete_data_labeling_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_data_labeling_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_dataset_sample.py b/.sample_configs/param_handlers/delete_dataset_sample.py new file mode 100644 index 0000000000..f904895071 --- /dev/null +++ b/.sample_configs/param_handlers/delete_dataset_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_dataset_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_endpoint_sample.py b/.sample_configs/param_handlers/delete_endpoint_sample.py new file mode 100644 index 0000000000..9581754e2b --- /dev/null +++ b/.sample_configs/param_handlers/delete_endpoint_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_endpoint_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_hyperparameter_tuning_job_sample.py b/.sample_configs/param_handlers/delete_hyperparameter_tuning_job_sample.py new file mode 100644 index 0000000000..862b496d73 --- /dev/null +++ b/.sample_configs/param_handlers/delete_hyperparameter_tuning_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_hyperparameter_tuning_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_model_sample.py b/.sample_configs/param_handlers/delete_model_sample.py new file mode 100644 index 0000000000..c914f4ea9a --- /dev/null +++ b/.sample_configs/param_handlers/delete_model_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_model_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_specialist_pool_sample.py b/.sample_configs/param_handlers/delete_specialist_pool_sample.py new file mode 100644 index 0000000000..8f31fdd204 --- /dev/null +++ b/.sample_configs/param_handlers/delete_specialist_pool_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_specialist_pool_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/delete_training_pipeline_sample.py b/.sample_configs/param_handlers/delete_training_pipeline_sample.py new file mode 100644 index 0000000000..bc02c10fd9 --- /dev/null +++ b/.sample_configs/param_handlers/delete_training_pipeline_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in delete_training_pipeline_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/deploy_model_custom_trained_model_sample.py b/.sample_configs/param_handlers/deploy_model_custom_trained_model_sample.py new file mode 100644 index 0000000000..f5071f2209 --- /dev/null +++ b/.sample_configs/param_handlers/deploy_model_custom_trained_model_sample.py @@ -0,0 +1,48 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_deployed_model(model_name: str, deployed_model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.endpoint.DeployedModel: + deployed_model = { + # format: 'projects/{project}/locations/{location}/models/{model}' + 'model': model_name, + 'display_name': deployed_model_display_name, + + # `dedicated_resources` must be used for non-AutoML models + 'dedicated_resources': { + 'min_replica_count': 1, + 'machine_spec': { + 'machine_type': 'n1-standard-2', + # Accelerators can be used only if the model specifies a GPU image. + # 'accelerator_type': aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, + # 'accelerator_count': 1, + } + } + } + + return deployed_model + +def make_traffic_split() -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.endpoint_service.DeployModelRequest.TrafficSplitEntry]: + # key '0' assigns traffic for the newly deployed model + # Traffic percentage values must add up to 100 + # Leave dictionary empty if endpoint should not accept any traffic + traffic_split = {'0': 100} + + return traffic_split + diff --git a/.sample_configs/param_handlers/deploy_model_sample.py b/.sample_configs/param_handlers/deploy_model_sample.py new file mode 100644 index 0000000000..eb615c989f --- /dev/null +++ b/.sample_configs/param_handlers/deploy_model_sample.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_deployed_model(model_name: str, deployed_model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.endpoint.DeployedModel: + deployed_model = { + # format: 'projects/{project}/locations/{location}/models/{model}' + 'model': model_name, + 'display_name': deployed_model_display_name, + # AutoML Vision models require `automatic_resources` field + # Other model types may require `dedicated_resources` field instead + 'automatic_resources': { + 'min_replica_count': 1, + 'max_replica_count': 1 + } + } + + return deployed_model + +def make_traffic_split() -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.endpoint_service.DeployModelRequest.TrafficSplitEntry]: + # key '0' assigns traffic for the newly deployed model + # Traffic percentage values must add up to 100 + # Leave dictionary empty if endpoint should not accept any traffic + traffic_split = {'0': 100} + + return traffic_split + diff --git a/.sample_configs/param_handlers/explain_custom_image_sample.py b/.sample_configs/param_handlers/explain_custom_image_sample.py new file mode 100644 index 0000000000..497e3ef164 --- /dev/null +++ b/.sample_configs/param_handlers/explain_custom_image_sample.py @@ -0,0 +1,38 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # The format of each instance should conform to the deployed model's prediction input schema. + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # custom models do not have additional parameters + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + +def make_deployed_model_id(): + # This works when there is only one deployed model. + deployed_model_id = None + return deployed_model_id diff --git a/.sample_configs/param_handlers/explain_custom_tabular_sample.py b/.sample_configs/param_handlers/explain_custom_tabular_sample.py new file mode 100644 index 0000000000..497e3ef164 --- /dev/null +++ b/.sample_configs/param_handlers/explain_custom_tabular_sample.py @@ -0,0 +1,38 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # The format of each instance should conform to the deployed model's prediction input schema. + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # custom models do not have additional parameters + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + +def make_deployed_model_id(): + # This works when there is only one deployed model. + deployed_model_id = None + return deployed_model_id diff --git a/.sample_configs/param_handlers/explain_sample.py b/.sample_configs/param_handlers/explain_sample.py new file mode 100644 index 0000000000..b4090cbb43 --- /dev/null +++ b/.sample_configs/param_handlers/explain_sample.py @@ -0,0 +1,39 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + # Sample function parameter endpoint in explain_sample + endpoint = endpoint + + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + +def make_deployed_model_id(deployed_model_id: str) -> str: + # Sample function parameter deployed_model_id in explain_sample + deployed_model_id = deployed_model_id + + return deployed_model_id + diff --git a/.sample_configs/param_handlers/explain_tabular_sample.py b/.sample_configs/param_handlers/explain_tabular_sample.py new file mode 100644 index 0000000000..b2f9436a85 --- /dev/null +++ b/.sample_configs/param_handlers/explain_tabular_sample.py @@ -0,0 +1,38 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # The format of each instance should conform to the deployed model's prediction input schema. + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # tabular models do not have additional parameters + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + +def make_deployed_model_id(): + # This works when there is only one deployed model. + deployed_model_id = None + return deployed_model_id diff --git a/.sample_configs/param_handlers/export_data_sample.py b/.sample_configs/param_handlers/export_data_sample.py new file mode 100644 index 0000000000..8d0236c5c9 --- /dev/null +++ b/.sample_configs/param_handlers/export_data_sample.py @@ -0,0 +1,27 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_export_config(gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.ExportDataConfig: + export_config = { + 'gcs_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + } + } + + return export_config + diff --git a/.sample_configs/param_handlers/export_evaluated_data_items_sample.py b/.sample_configs/param_handlers/export_evaluated_data_items_sample.py new file mode 100644 index 0000000000..38fdd68a59 --- /dev/null +++ b/.sample_configs/param_handlers/export_evaluated_data_items_sample.py @@ -0,0 +1,20 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_model_evaluation(name: str) -> str: + model_evaluation = name + + return model_evaluation + diff --git a/.sample_configs/param_handlers/export_model_sample.py b/.sample_configs/param_handlers/export_model_sample.py new file mode 100644 index 0000000000..c096b20d45 --- /dev/null +++ b/.sample_configs/param_handlers/export_model_sample.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in export_model_sample + name = name + + return name + +def make_output_config(gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.model_service.ExportModelRequest.OutputConfig: + output_config = { + 'artifact_destination': { + 'output_uri_prefix': gcs_destination_output_uri_prefix + } + } + + return output_config + diff --git a/.sample_configs/param_handlers/export_model_tabular_classification_sample.py b/.sample_configs/param_handlers/export_model_tabular_classification_sample.py new file mode 100644 index 0000000000..fec6655664 --- /dev/null +++ b/.sample_configs/param_handlers/export_model_tabular_classification_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in export_model_sample + name = name + + return name + +def make_output_config(gcs_destination_output_uri_prefix: str) -> google.cloud.aiplatform_v1alpha1.types.model_service.ExportModelRequest.OutputConfig: + gcs_destination = { + 'output_uri_prefix': gcs_destination_output_uri_prefix + } + output_config = { + 'artifact_destination': gcs_destination, + 'export_format_id': 'tf-saved-model' + } + + return output_config + diff --git a/.sample_configs/param_handlers/export_model_video_action_recognition_sample.py b/.sample_configs/param_handlers/export_model_video_action_recognition_sample.py new file mode 100644 index 0000000000..a562e37e4f --- /dev/null +++ b/.sample_configs/param_handlers/export_model_video_action_recognition_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in export_model_video_action_recognition_sample + name = name + + return name + + +def make_output_config( + gcs_destination_output_uri_prefix: str, + export_format: str +) -> google.cloud.aiplatform_v1beta1.types.model_service.OutputConfig: + gcs_destination = {"output_uri_prefix": gcs_destination_output_uri_prefix} + output_config = { + "artifact_destination": gcs_destination, + "export_format_id": export_format, + } + + return output_config diff --git a/.sample_configs/param_handlers/get_annotation_sample.py b/.sample_configs/param_handlers/get_annotation_sample.py new file mode 100644 index 0000000000..6f61d85d71 --- /dev/null +++ b/.sample_configs/param_handlers/get_annotation_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_annotation_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_annotation_spec_sample.py b/.sample_configs/param_handlers/get_annotation_spec_sample.py new file mode 100644 index 0000000000..ff0b8cf1ba --- /dev/null +++ b/.sample_configs/param_handlers/get_annotation_spec_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_annotation_spec_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_batch_prediction_job_sample.py b/.sample_configs/param_handlers/get_batch_prediction_job_sample.py new file mode 100644 index 0000000000..8a969ece15 --- /dev/null +++ b/.sample_configs/param_handlers/get_batch_prediction_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_batch_prediction_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_custom_job_sample.py b/.sample_configs/param_handlers/get_custom_job_sample.py new file mode 100644 index 0000000000..6b6ec80b19 --- /dev/null +++ b/.sample_configs/param_handlers/get_custom_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_custom_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_data_item_sample.py b/.sample_configs/param_handlers/get_data_item_sample.py new file mode 100644 index 0000000000..50ee380889 --- /dev/null +++ b/.sample_configs/param_handlers/get_data_item_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_data_item_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_data_labeling_job_sample.py b/.sample_configs/param_handlers/get_data_labeling_job_sample.py new file mode 100644 index 0000000000..dad5bc8f12 --- /dev/null +++ b/.sample_configs/param_handlers/get_data_labeling_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_data_labeling_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_dataset_sample.py b/.sample_configs/param_handlers/get_dataset_sample.py new file mode 100644 index 0000000000..36498ecd4e --- /dev/null +++ b/.sample_configs/param_handlers/get_dataset_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_dataset_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_endpoint_sample.py b/.sample_configs/param_handlers/get_endpoint_sample.py new file mode 100644 index 0000000000..67f2c26bbf --- /dev/null +++ b/.sample_configs/param_handlers/get_endpoint_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_endpoint_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_hyperparameter_tuning_job_sample.py b/.sample_configs/param_handlers/get_hyperparameter_tuning_job_sample.py new file mode 100644 index 0000000000..e42e006fa9 --- /dev/null +++ b/.sample_configs/param_handlers/get_hyperparameter_tuning_job_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_hyperparameter_tuning_job_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_entity_extraction_sample.py b/.sample_configs/param_handlers/get_model_evaluation_entity_extraction_sample.py new file mode 100644 index 0000000000..d79a3c35bf --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_entity_extraction_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_entity_extraction_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_image_classification_sample.py b/.sample_configs/param_handlers/get_model_evaluation_image_classification_sample.py new file mode 100644 index 0000000000..3f0779c7ec --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_image_classification_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_image_classification_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_image_object_detection_sample.py b/.sample_configs/param_handlers/get_model_evaluation_image_object_detection_sample.py new file mode 100644 index 0000000000..d30120fa00 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_image_object_detection_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_image_object_detection_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_sample.py b/.sample_configs/param_handlers/get_model_evaluation_sample.py new file mode 100644 index 0000000000..7c5b71ab64 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_sentiment_analysis_sample.py b/.sample_configs/param_handlers/get_model_evaluation_sentiment_analysis_sample.py new file mode 100644 index 0000000000..210acb165a --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_sentiment_analysis_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_sentiment_analysis_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_slice_sample.py b/.sample_configs/param_handlers/get_model_evaluation_slice_sample.py new file mode 100644 index 0000000000..3d2ae6f35d --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_slice_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_slice_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_tabular_classification_sample.py b/.sample_configs/param_handlers/get_model_evaluation_tabular_classification_sample.py new file mode 100644 index 0000000000..79ffd458f7 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_tabular_classification_sample.py @@ -0,0 +1,20 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_tabular_regression_sample.py b/.sample_configs/param_handlers/get_model_evaluation_tabular_regression_sample.py new file mode 100644 index 0000000000..79ffd458f7 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_tabular_regression_sample.py @@ -0,0 +1,20 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_text_classification_sample.py b/.sample_configs/param_handlers/get_model_evaluation_text_classification_sample.py new file mode 100644 index 0000000000..f6b62d0b64 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_text_classification_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_text_classification_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_text_entity_extraction_sample.py b/.sample_configs/param_handlers/get_model_evaluation_text_entity_extraction_sample.py new file mode 100644 index 0000000000..9eb19f37a1 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_text_entity_extraction_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_text_entity_extraction_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/get_model_evaluation_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..6ec1e9f1ce --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_text_sentiment_analysis_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_text_sentiment_analysis_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_video_action_recognition_sample.py b/.sample_configs/param_handlers/get_model_evaluation_video_action_recognition_sample.py new file mode 100644 index 0000000000..396e6bb945 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_video_action_recognition_sample.py @@ -0,0 +1,20 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_video_action_recognition_sample + name = name + + return name diff --git a/.sample_configs/param_handlers/get_model_evaluation_video_classification_sample.py b/.sample_configs/param_handlers/get_model_evaluation_video_classification_sample.py new file mode 100644 index 0000000000..270fa0963c --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_video_classification_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_video_classification_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_evaluation_video_object_tracking_sample.py b/.sample_configs/param_handlers/get_model_evaluation_video_object_tracking_sample.py new file mode 100644 index 0000000000..c07124fc02 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_evaluation_video_object_tracking_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_evaluation_video_object_tracking_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_model_sample.py b/.sample_configs/param_handlers/get_model_sample.py new file mode 100644 index 0000000000..c293a92b24 --- /dev/null +++ b/.sample_configs/param_handlers/get_model_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_model_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_specialist_pool_sample.py b/.sample_configs/param_handlers/get_specialist_pool_sample.py new file mode 100644 index 0000000000..ca3a1bad2f --- /dev/null +++ b/.sample_configs/param_handlers/get_specialist_pool_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_specialist_pool_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/get_training_pipeline_sample.py b/.sample_configs/param_handlers/get_training_pipeline_sample.py new file mode 100644 index 0000000000..18cc8a6fdc --- /dev/null +++ b/.sample_configs/param_handlers/get_training_pipeline_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in get_training_pipeline_sample + name = name + + return name + diff --git a/.sample_configs/param_handlers/import_data_image_classification_single_label_sample.py b/.sample_configs/param_handlers/import_data_image_classification_single_label_sample.py new file mode 100644 index 0000000000..b27346394b --- /dev/null +++ b/.sample_configs/param_handlers/import_data_image_classification_single_label_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml", + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_image_object_detection_sample.py b/.sample_configs/param_handlers/import_data_image_object_detection_sample.py new file mode 100644 index 0000000000..b21f18a799 --- /dev/null +++ b/.sample_configs/param_handlers/import_data_image_object_detection_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_bounding_box_io_format_1.0.0.yaml", + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_sample.py b/.sample_configs/param_handlers/import_data_sample.py new file mode 100644 index 0000000000..b4abcb6505 --- /dev/null +++ b/.sample_configs/param_handlers/import_data_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str, import_schema_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + # Here we use only one import config with one source + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": import_schema_uri, + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_text_classification_single_label_sample.py b/.sample_configs/param_handlers/import_data_text_classification_single_label_sample.py new file mode 100644 index 0000000000..99863782db --- /dev/null +++ b/.sample_configs/param_handlers/import_data_text_classification_single_label_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_classification_single_label_io_format_1.0.0.yaml", + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_text_entity_extraction_sample.py b/.sample_configs/param_handlers/import_data_text_entity_extraction_sample.py new file mode 100644 index 0000000000..b09001e3ac --- /dev/null +++ b/.sample_configs/param_handlers/import_data_text_entity_extraction_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_extraction_io_format_1.0.0.yaml" + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/import_data_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..ededd4472e --- /dev/null +++ b/.sample_configs/param_handlers/import_data_text_sentiment_analysis_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_sentiment_io_format_1.0.0.yaml" + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_video_action_recognition_sample.py b/.sample_configs/param_handlers/import_data_video_action_recognition_sample.py new file mode 100644 index 0000000000..9573d3bff5 --- /dev/null +++ b/.sample_configs/param_handlers/import_data_video_action_recognition_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + # Sample function parameter name in import_data_video_action_recognition_sample + name = name + + return name + + +def make_import_configs( + gcs_source_uri: str, +) -> typing.Sequence[google.cloud.aiplatform_v1beta1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": {"uris": [gcs_source_uri]}, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_action_recognition_io_format_1.0.0.yaml", + } + ] + + return import_configs diff --git a/.sample_configs/param_handlers/import_data_video_classification_sample.py b/.sample_configs/param_handlers/import_data_video_classification_sample.py new file mode 100644 index 0000000000..fb5900e4bb --- /dev/null +++ b/.sample_configs/param_handlers/import_data_video_classification_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_classification_io_format_1.0.0.yaml", + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/import_data_video_object_tracking_sample.py b/.sample_configs/param_handlers/import_data_video_object_tracking_sample.py new file mode 100644 index 0000000000..40e4298aba --- /dev/null +++ b/.sample_configs/param_handlers/import_data_video_object_tracking_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_name(name: str) -> str: + return name + +def make_import_configs(gcs_source_uri: str) -> typing.Sequence[google.cloud.aiplatform_v1alpha1.types.dataset.ImportDataConfig]: + import_configs = [ + { + "gcs_source": { + "uris": [ + gcs_source_uri + ] + }, + "import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_object_tracking_io_format_1.0.0.yaml", + } + ] + + return import_configs + diff --git a/.sample_configs/param_handlers/list_annotation_specs_sample.py b/.sample_configs/param_handlers/list_annotation_specs_sample.py new file mode 100644 index 0000000000..1c7d24f02d --- /dev/null +++ b/.sample_configs/param_handlers/list_annotation_specs_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_annotation_specs_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_annotations_sample.py b/.sample_configs/param_handlers/list_annotations_sample.py new file mode 100644 index 0000000000..cd8f117a85 --- /dev/null +++ b/.sample_configs/param_handlers/list_annotations_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_annotations_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_batch_prediction_jobs_sample.py b/.sample_configs/param_handlers/list_batch_prediction_jobs_sample.py new file mode 100644 index 0000000000..260663d5bd --- /dev/null +++ b/.sample_configs/param_handlers/list_batch_prediction_jobs_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_batch_prediction_jobs_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_custom_jobs_sample.py b/.sample_configs/param_handlers/list_custom_jobs_sample.py new file mode 100644 index 0000000000..467de2cc16 --- /dev/null +++ b/.sample_configs/param_handlers/list_custom_jobs_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_custom_jobs_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_data_items_sample.py b/.sample_configs/param_handlers/list_data_items_sample.py new file mode 100644 index 0000000000..d831fe7a28 --- /dev/null +++ b/.sample_configs/param_handlers/list_data_items_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_data_items_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_data_labeling_jobs_sample.py b/.sample_configs/param_handlers/list_data_labeling_jobs_sample.py new file mode 100644 index 0000000000..262b8ef6e9 --- /dev/null +++ b/.sample_configs/param_handlers/list_data_labeling_jobs_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_data_labeling_jobs_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_datasets_sample.py b/.sample_configs/param_handlers/list_datasets_sample.py new file mode 100644 index 0000000000..9a81bb3ad6 --- /dev/null +++ b/.sample_configs/param_handlers/list_datasets_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_datasets_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_endpoints_sample.py b/.sample_configs/param_handlers/list_endpoints_sample.py new file mode 100644 index 0000000000..e6782b4916 --- /dev/null +++ b/.sample_configs/param_handlers/list_endpoints_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_endpoints_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_hyperparameter_tuning_jobs_sample.py b/.sample_configs/param_handlers/list_hyperparameter_tuning_jobs_sample.py new file mode 100644 index 0000000000..adc069850d --- /dev/null +++ b/.sample_configs/param_handlers/list_hyperparameter_tuning_jobs_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_hyperparameter_tuning_jobs_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_model_evaluation_slices_sample.py b/.sample_configs/param_handlers/list_model_evaluation_slices_sample.py new file mode 100644 index 0000000000..89916f8568 --- /dev/null +++ b/.sample_configs/param_handlers/list_model_evaluation_slices_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_model_evaluation_slices_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_model_evaluations_sample.py b/.sample_configs/param_handlers/list_model_evaluations_sample.py new file mode 100644 index 0000000000..f8f015c00c --- /dev/null +++ b/.sample_configs/param_handlers/list_model_evaluations_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_model_evaluations_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_models_sample.py b/.sample_configs/param_handlers/list_models_sample.py new file mode 100644 index 0000000000..28e005f894 --- /dev/null +++ b/.sample_configs/param_handlers/list_models_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_models_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_specialist_pools_sample.py b/.sample_configs/param_handlers/list_specialist_pools_sample.py new file mode 100644 index 0000000000..8e990bd028 --- /dev/null +++ b/.sample_configs/param_handlers/list_specialist_pools_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_specialist_pools_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/list_training_pipelines_sample.py b/.sample_configs/param_handlers/list_training_pipelines_sample.py new file mode 100644 index 0000000000..9b7fe458d4 --- /dev/null +++ b/.sample_configs/param_handlers/list_training_pipelines_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in list_training_pipelines_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/predict_custom_trained_model_sample.py b/.sample_configs/param_handlers/predict_custom_trained_model_sample.py new file mode 100644 index 0000000000..32ddf2b49a --- /dev/null +++ b/.sample_configs/param_handlers/predict_custom_trained_model_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # The format of each instance should conform to the deployed model's prediction input schema. + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_image_classification_sample.py b/.sample_configs/param_handlers/predict_image_classification_sample.py new file mode 100644 index 0000000000..ca8f00dc13 --- /dev/null +++ b/.sample_configs/param_handlers/predict_image_classification_sample.py @@ -0,0 +1,41 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(filename: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + content = read_file(filename) + + # The format of each instance should conform to the deployed model's prediction input schema. + encoded_content = b64_encode(content) + instance_dict = {'content': encoded_content} + + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. + parameters_dict = { + "confidence_threshold": 0.5, + "max_predictions": 5 + } + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_image_file_sample.py b/.sample_configs/param_handlers/predict_image_file_sample.py new file mode 100644 index 0000000000..c291b2c32d --- /dev/null +++ b/.sample_configs/param_handlers/predict_image_file_sample.py @@ -0,0 +1,38 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(filename: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + content = read_file(filename) + + # The format of each instance should conform to the deployed model's prediction input schema. + encoded_content = b64_encode(content) + instance_dict = {'content': encoded_content} + + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_image_object_detection_sample.py b/.sample_configs/param_handlers/predict_image_object_detection_sample.py new file mode 100644 index 0000000000..975558e1ab --- /dev/null +++ b/.sample_configs/param_handlers/predict_image_object_detection_sample.py @@ -0,0 +1,41 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(filename: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + content = read_file(filename) + + # The format of each instance should conform to the deployed model's prediction input schema. + encoded_content = b64_encode(content) + instance_dict = {'content': encoded_content} + + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + # See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters. + parameters_dict = { + "confidence_threshold": 0.5, + "max_predictions": 5 + } + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_sample.py b/.sample_configs/param_handlers/predict_sample.py new file mode 100644 index 0000000000..d88d3d9be4 --- /dev/null +++ b/.sample_configs/param_handlers/predict_sample.py @@ -0,0 +1,31 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_tabular_classification_sample.py b/.sample_configs/param_handlers/predict_tabular_classification_sample.py new file mode 100644 index 0000000000..5c7dd96408 --- /dev/null +++ b/.sample_configs/param_handlers/predict_tabular_classification_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # for more info on the instance schema, please use get_model_sample.py + # and look at the yaml found in instance_schema_uri + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_tabular_regression_sample.py b/.sample_configs/param_handlers/predict_tabular_regression_sample.py new file mode 100644 index 0000000000..5c7dd96408 --- /dev/null +++ b/.sample_configs/param_handlers/predict_tabular_regression_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # for more info on the instance schema, please use get_model_sample.py + # and look at the yaml found in instance_schema_uri + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_text_classification_single_label_sample.py b/.sample_configs/param_handlers/predict_text_classification_single_label_sample.py new file mode 100644 index 0000000000..9c55451bee --- /dev/null +++ b/.sample_configs/param_handlers/predict_text_classification_single_label_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(content: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + instance_dict = {'content': content} + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_text_entity_extraction_sample.py b/.sample_configs/param_handlers/predict_text_entity_extraction_sample.py new file mode 100644 index 0000000000..d8185f0edc --- /dev/null +++ b/.sample_configs/param_handlers/predict_text_entity_extraction_sample.py @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(content: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + # The format of each instance should conform to the deployed model's prediction input schema + instance_dict = {'content': content} + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters + diff --git a/.sample_configs/param_handlers/predict_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/predict_text_sentiment_analysis_sample.py new file mode 100644 index 0000000000..a741e7723f --- /dev/null +++ b/.sample_configs/param_handlers/predict_text_sentiment_analysis_sample.py @@ -0,0 +1,31 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + return endpoint + +def make_instances(content: str) -> typing.Sequence[google.protobuf.struct_pb2.Value]: + instance_dict = {'content': content} + instance = to_protobuf_value(instance_dict) + instances = [instance] + + return instances + +def make_parameters() -> google.protobuf.struct_pb2.Value: + parameters_dict = {} + parameters = to_protobuf_value(parameters_dict) + + return parameters diff --git a/.sample_configs/param_handlers/search_migratable_resources_sample.py b/.sample_configs/param_handlers/search_migratable_resources_sample.py new file mode 100644 index 0000000000..780ca436ab --- /dev/null +++ b/.sample_configs/param_handlers/search_migratable_resources_sample.py @@ -0,0 +1,21 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in search_migratable_resources_sample + parent = parent + + return parent + diff --git a/.sample_configs/param_handlers/undeploy_model_sample.py b/.sample_configs/param_handlers/undeploy_model_sample.py new file mode 100644 index 0000000000..30de16d81b --- /dev/null +++ b/.sample_configs/param_handlers/undeploy_model_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint: str) -> str: + endpoint = endpoint + + return endpoint + +def make_deployed_model_id(deployed_model_id: str) -> str: + # Sample function parameter deployed_model_id in undeploy_model_sample + deployed_model_id = deployed_model_id + + return deployed_model_id + +def make_traffic_split() -> dict: + # If after the undeployment there is at least one deployed model remaining in the endpoint, traffic_split should be set to a mapping from remaining deployed models' ids to integer percentages that sum to 100. + traffic_split = {} + + return traffic_split + diff --git a/.sample_configs/param_handlers/update_annotation_spec_sample.py b/.sample_configs/param_handlers/update_annotation_spec_sample.py new file mode 100644 index 0000000000..e53011855d --- /dev/null +++ b/.sample_configs/param_handlers/update_annotation_spec_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_annotation_spec(annotation_spec_name: str, new_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.annotation_spec.AnnotationSpec: + annotation_spec = { + 'name': annotation_spec_name, + 'display_name': new_display_name + } + + return annotation_spec + +def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask: + update_mask = { + 'paths': [ + 'display_name' + ] + } + + return update_mask + diff --git a/.sample_configs/param_handlers/update_dataset_sample.py b/.sample_configs/param_handlers/update_dataset_sample.py new file mode 100644 index 0000000000..4e6c1db26e --- /dev/null +++ b/.sample_configs/param_handlers/update_dataset_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_dataset(dataset_name: str, new_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.dataset.Dataset: + dataset = { + 'name': dataset_name, + 'display_name': new_display_name, + } + + return dataset + +def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask: + update_mask = { + 'paths': [ + 'display_name' + ] + } + + return update_mask + diff --git a/.sample_configs/param_handlers/update_endpoint_sample.py b/.sample_configs/param_handlers/update_endpoint_sample.py new file mode 100644 index 0000000000..15b497a6bc --- /dev/null +++ b/.sample_configs/param_handlers/update_endpoint_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_endpoint(endpoint_name: str, new_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.endpoint.Endpoint: + endpoint = { + 'name': endpoint_name, + 'display_name': new_display_name + } + + return endpoint + +def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask: + update_mask = { + 'paths': [ + 'display_name' + ] + } + + return update_mask + diff --git a/.sample_configs/param_handlers/update_model_sample.py b/.sample_configs/param_handlers/update_model_sample.py new file mode 100644 index 0000000000..fff76e2ac5 --- /dev/null +++ b/.sample_configs/param_handlers/update_model_sample.py @@ -0,0 +1,34 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_model(model_name: str, new_display_name: str) -> google.cloud.aiplatform_v1beta1.types.model.Model: + model = model + model = { + 'name': model_name, + 'display_name': new_display_name + } + + + return model + +def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask: + update_mask = { + 'paths': [ + 'display_name' + ] + } + + return update_mask + diff --git a/.sample_configs/param_handlers/update_specialist_pool_sample.py b/.sample_configs/param_handlers/update_specialist_pool_sample.py new file mode 100644 index 0000000000..be403a0e22 --- /dev/null +++ b/.sample_configs/param_handlers/update_specialist_pool_sample.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_specialist_pool(specialist_pool_name: str, new_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.specialist_pool.SpecialistPool: + specialist_pool = { + 'name': specialist_pool_name, + 'display_name': new_display_name, + } + + return specialist_pool + +def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask: + update_mask = { + 'paths': [ + 'display_name' + ] + } + + return update_mask + diff --git a/.sample_configs/param_handlers/upload_model_custom_container_sample.py b/.sample_configs/param_handlers/upload_model_custom_container_sample.py new file mode 100644 index 0000000000..f4685600cb --- /dev/null +++ b/.sample_configs/param_handlers/upload_model_custom_container_sample.py @@ -0,0 +1,39 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in upload_model_using_custom_container_sample + parent = parent + + return parent + + +def make_model( + display_name: str, container_spec_image_uri: str, artifact_uri: str +) -> google.cloud.aiplatform_v1beta1.types.model.Model: + + container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []} + + model = { + "display_name": display_name, + # The artifact_uri should be the path to a GCS directory containing + # saved model artifacts. The bucket must be accessible for the + # project's AI Platform service account and in the same region as + # the api endpoint. + "artifact_uri": artifact_uri, + "container_spec": container_spec, + } + + return model diff --git a/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py b/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py new file mode 100644 index 0000000000..6710c55080 --- /dev/null +++ b/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py @@ -0,0 +1,69 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_model( + display_name: str, + container_spec_image_uri: str, + artifact_uri: str, + input_tensor_name: str, + output_tensor_name: str +) -> google.cloud.aiplatform_v1beta1.types.model.Model: + + # Container specification for deploying the model + container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []} + + # The explainabilty method and corresponding parameters + parameters = aiplatform.gapic.ExplanationParameters({"xrai_attribution": { "step_count": 1}}) + + # The input tensor for feature attribution to the output + # For single input model, y = f(x), this will be the serving input layer. + input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata({ + "input_tensor_name": input_tensor_name, + # Input is image data + "modality": "image", + }) + + # The output tensor to explain + # For single output model, y = f(x), this will be the serving output layer. + output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata({ + "output_tensor_name": output_tensor_name + }) + + # Assemble the explanation metadata + metadata = aiplatform.gapic.ExplanationMetadata( + inputs={'image': input_metadata}, + outputs={'prediction' : output_metadata} + ) + + # Assemble the explanation specification + explanation_spec = aiplatform.gapic.ExplanationSpec( + parameters=parameters, + metadata=metadata + ) + + model = aiplatform.gapic.Model(display_name=display_name, + # The Cloud Storage location of the custom model + artifact_uri=artifact_uri, + explanation_spec=explanation_spec, + container_spec=container_spec + ) + + return model + diff --git a/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py b/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py new file mode 100644 index 0000000000..5c23bccd75 --- /dev/null +++ b/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py @@ -0,0 +1,73 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + parent = parent + + return parent + +def make_model( + display_name: str, + container_spec_image_uri: str, + artifact_uri: str, + input_tensor_name: str, + output_tensor_name: str, + feature_names: list +) -> google.cloud.aiplatform_v1beta1.types.model.Model: + + # Container specification for deploying the model + container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []} + + # The explainabilty method and corresponding parameters + parameters = aiplatform.gapic.ExplanationParameters({"xrai_attribution": { "step_count": 1}}) + + # The input tensor for feature attribution to the output + # For single input model, y = f(x), this will be the serving input layer. + input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata({ + "input_tensor_name": input_tensor_name, + # Input is tabular data + "modality": "numeric", + # Assign feature names to the inputs for explanation + "encoding": "BAG_OF_FEATURES", + "index_feature_mapping": feature_names + }) + + # The output tensor to explain + # For single output model, y = f(x), this will be the serving output layer. + output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata({ + "output_tensor_name": output_tensor_name + }) + + # Assemble the explanation metadata + metadata = aiplatform.gapic.ExplanationMetadata( + inputs={'features': input_metadata}, + outputs={'prediction' : output_metadata} + ) + + # Assemble the explanation specification + explanation_spec = aiplatform.gapic.ExplanationSpec( + parameters=parameters, + metadata=metadata + ) + + model = aiplatform.gapic.Model(display_name=display_name, + # The Cloud Storage location of the custom model + artifact_uri=artifact_uri, + explanation_spec=explanation_spec, + container_spec=container_spec + ) + + return model + diff --git a/.sample_configs/param_handlers/upload_model_managed_container_sample.py b/.sample_configs/param_handlers/upload_model_managed_container_sample.py new file mode 100644 index 0000000000..d9b779bfa5 --- /dev/null +++ b/.sample_configs/param_handlers/upload_model_managed_container_sample.py @@ -0,0 +1,39 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in upload_model_using_managed_container_sample + parent = parent + + return parent + + +def make_model( + display_name: str, container_spec_image_uri: str, artifact_uri: str +) -> google.cloud.aiplatform_v1beta1.types.model.Model: + + container_spec = {"image_uri": container_spec_image_uri} + + model = { + "display_name": display_name, + # The artifact_uri should be the path to a GCS directory containing + # saved model artifacts. The bucket must be accessible for the + # project's AI Platform service account and in the same region as + # the api endpoint. + "artifact_uri": artifact_uri, + "container_spec": container_spec, + } + + return model diff --git a/.sample_configs/param_handlers/upload_model_sample.py b/.sample_configs/param_handlers/upload_model_sample.py new file mode 100644 index 0000000000..7adcbba78a --- /dev/null +++ b/.sample_configs/param_handlers/upload_model_sample.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def make_parent(parent: str) -> str: + # Sample function parameter parent in upload_model_sample + parent = parent + + return parent + +def make_model(display_name: str, metadata_schema_uri: str, image_uri: str, artifact_uri: str) -> google.cloud.aiplatform_v1alpha1.types.model.Model: + model = { + 'display_name': display_name, + 'metadata_schema_uri': metadata_schema_uri, + # The artifact_uri should be the path to a GCS directory containing + # saved model artifacts. The bucket must be accessible for the + # project's AI Platform service account and in the same region as + # the api endpoint. + 'artifact_uri': artifact_uri, + 'container_spec': { + 'image_uri': image_uri, + 'command': [], + 'args': [], + 'env': [], + 'ports': [], + 'predict_route': '', + 'health_route': '' + } + } + + return model + diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml new file mode 100644 index 0000000000..7eb80f400f --- /dev/null +++ b/.sample_configs/process_configs.yaml @@ -0,0 +1,333 @@ +global: + api_endpoint: us-central1-aiplatform.googleapis.com + timeout: 300 + skip: + - etag + - create_time + - update_time + max_depth: -1 + license_year: 2020 +batch_migrate_resources_sample: {} +cancel_batch_prediction_job_sample: {} +cancel_custom_job_sample: {} +cancel_data_labeling_job_sample: {} +cancel_hyperparameter_tuning_job_sample: {} +cancel_training_pipeline_sample: {} +create_batch_prediction_job_bigquery_sample: {} +create_batch_prediction_job_custom_image_explain_sample: {} +create_batch_prediction_job_custom_tabular_explain_sample: {} +create_batch_prediction_job_sample: {} +create_batch_prediction_job_tabular_explain_sample: {} +create_batch_prediction_job_text_classification_sample: {} +create_batch_prediction_job_text_entity_extraction_sample: {} +create_batch_prediction_job_text_sentiment_analysis_sample: {} +create_batch_prediction_job_video_action_recognition_sample: {} +create_batch_prediction_job_video_classification_sample: {} +create_batch_prediction_job_video_object_tracking_sample: {} +create_custom_job_python_package_sample: {} +create_custom_job_sample: {} +create_data_labeling_job_active_learning_sample: {} +create_data_labeling_job_image_segmentation_sample: {} +create_data_labeling_job_images_sample: {} +create_data_labeling_job_sample: {} +create_data_labeling_job_specialist_pool_sample: {} +create_data_labeling_job_text_sample: {} +create_data_labeling_job_video_sample: {} +create_dataset_image_sample: + skip: + - create_time + - update_time +create_dataset_sample: + skip: + - create_time + - update_time +create_dataset_tabular_bigquery_sample: + skip: + - labels + - create_time + - update_time +create_dataset_tabular_gcs_sample: + skip: + - labels + - create_time + - update_time +create_dataset_text_sample: + skip: + - create_time + - update_time +create_dataset_video_sample: + skip: + - create_time + - update_time +create_endpoint_sample: + skip: + - traffic_split + - deployed_models +create_hyperparameter_tuning_job_python_package_sample: {} +create_hyperparameter_tuning_job_sample: {} +create_specialist_pool_sample: {} +create_training_pipeline_custom_job_sample: {} +create_training_pipeline_custom_training_managed_dataset_sample: {} +create_training_pipeline_entity_extraction_sample: {} +create_training_pipeline_image_classification_sample: {} +create_training_pipeline_image_object_detection_sample: {} +create_training_pipeline_sample: {} +create_training_pipeline_tabular_classification_sample: {} +create_training_pipeline_tabular_regression_sample: {} +create_training_pipeline_text_classification_sample: {} +create_training_pipeline_text_entity_extraction_sample: + skip: + - predict_schemata + - supported_export_formats + - container_spec + - deployed_models + - explanation_spec +create_training_pipeline_text_sentiment_analysis_sample: + skip: + - predict_schemata + - supported_export_formats + - container_spec + - deployed_models + - explanation_spec +create_training_pipeline_video_action_recognition_sample: + skip: + - predict_schemata + - supported_export_formats + - container_spec + - deployed_models + - explanation_spec +create_training_pipeline_video_classification_sample: + skip: + - predict_schemata + - supported_export_formats + - container_spec + - deployed_models + - explanation_spec +create_training_pipeline_video_object_tracking_sample: + skip: + - predict_schemata + - supported_export_formats + - container_spec + - deployed_models + - explanation_spec +delete_batch_prediction_job_sample: {} +delete_custom_job_sample: {} +delete_data_labeling_job_sample: {} +delete_dataset_sample: {} +delete_endpoint_sample: {} +delete_hyperparameter_tuning_job_sample: {} +delete_model_sample: {} +delete_specialist_pool_sample: {} +delete_training_pipeline_sample: {} +deploy_model_custom_trained_model_sample: + max_depth: 0 + resource_name: endpoint + timeout: 7200 + skip: + - explanation_spec +deploy_model_sample: + max_depth: 0 + resource_name: endpoint + timeout: 7200 + skip: + - explanation_spec +explain_custom_image_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint +explain_custom_tabular_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + approximation_error: This is the approximation error. + attributions: Feature attributions. +explain_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + approximation_error: This is the approximation error. + attributions: Feature attributions. +explain_tabular_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 2 + resource_name: endpoint + comments: + approximation_error: This is the approximation error. + attributions: Feature attributions. +export_data_sample: + resource_name: dataset +export_evaluated_data_items_sample: + resource_name: model_evaluation +export_model_sample: {} +export_model_tabular_classification_sample: {} +export_model_video_action_recognition_sample: {} +export_model_video_classification_sample: {} +export_model_video_object_tracking_sample: {} +get_annotation_spec_sample: {} +get_batch_prediction_job_sample: + skip: + - dedicated_resources + - manual_batch_tuning_parameters +get_custom_job_sample: {} +get_data_labeling_job_sample: {} +get_dataset_sample: {} +get_endpoint_sample: {} +get_hyperparameter_tuning_job_sample: {} +get_model_evaluation_image_classification_sample: {} +get_model_evaluation_image_object_detection_sample: {} +get_model_evaluation_sample: + skip: + - model_explanation +get_model_evaluation_slice_sample: {} +get_model_evaluation_tabular_classification_sample: {} +get_model_evaluation_tabular_regression_sample: {} +get_model_evaluation_text_classification_sample: + skip: + - create_time + - slice_dimensions +get_model_evaluation_text_entity_extraction_sample: + skip: + - create_time +get_model_evaluation_text_sentiment_analysis_sample: + skip: + - create_time +get_model_evaluation_video_action_recognition_sample: + skip: + - model_explanation +get_model_evaluation_video_classification_sample: + skip: + - model_explanation +get_model_evaluation_video_object_tracking_sample: + skip: + - model_explanation +get_model_sample: + skip: + - explanation_spec +get_specialist_pool_sample: {} +get_training_pipeline_sample: {} +import_data_image_classification_sample: + resource_name: dataset +import_data_image_classification_single_label_sample: + timeout: 1800 + resource_name: dataset +import_data_image_object_detection_sample: + timeout: 1800 + resource_name: dataset +import_data_sample: + timeout: 1800 + resource_name: dataset +import_data_text_classification_single_label_sample: + timeout: 1800 + resource_name: dataset +import_data_text_entity_extraction_sample: + resource_name: dataset + timeout: 1800 +import_data_text_sentiment_analysis_sample: + resource_name: dataset + timeout: 1800 +import_data_video_action_recognition_sample: + resource_name: dataset + timeout: 1800 +import_data_video_classification_sample: + timeout: 1800 + resource_name: dataset +import_data_video_object_tracking_sample: + timeout: 1800 + resource_name: dataset +list_annotations_sample: {} +list_batch_prediction_jobs_sample: {} +list_custom_jobs_sample: {} +list_data_items_sample: {} +list_data_labeling_jobs_sample: {} +list_datasets_sample: {} +list_endpoints_sample: {} +list_hyperparameter_tuning_jobs_sample: {} +list_model_evaluation_slices_sample: {} +list_model_evaluations_sample: {} +list_models_sample: {} +list_specialist_pools_sample: {} +list_training_pipelines_sample: {} +predict_custom_trained_model_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: The predictions are a google.protobuf.Value representation of the + model's predictions. +predict_image_classification_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml + for the format of the predictions. +predict_image_file_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint +predict_image_object_detection_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml + for the format of the predictions. +predict_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + region_tags: + - aiplatform_predict_sample + - aiplatform_predict_tutorial +predict_tabular_classification_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_classification.yaml + for the format of the predictions. +predict_tabular_regression_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_regression.yaml + for the format of the predictions. +predict_text_classification_single_label_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_classification.yaml + for the format of the predictions. +predict_text_entity_extraction_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction.yaml + for the format of the predictions. +predict_text_sentiment_analysis_sample: + api_endpoint: us-central1-prediction-aiplatform.googleapis.com + max_depth: 1 + resource_name: endpoint + comments: + predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment.yaml + for the format of the predictions. +search_migratable_resources_sample: {} +undeploy_model_sample: + resource_name: endpoint +update_dataset_sample: {} +update_endpoint_sample: {} +update_model_sample: {} +update_specialist_pool_sample: {} +upload_model_custom_container_sample: + timeout: 1800 +upload_model_explain_image_managed_container_sample: {} +upload_model_explain_tabular_managed_container_sample: {} +upload_model_managed_container_sample: + timeout: 1800 +upload_model_sample: + timeout: 1800 diff --git a/.sample_configs/resource_info.yaml b/.sample_configs/resource_info.yaml new file mode 100644 index 0000000000..4c8eae85eb --- /dev/null +++ b/.sample_configs/resource_info.yaml @@ -0,0 +1,33 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +mapping: + evaluation: model_evaluation + slice: model_evaluation_slice +resources: +- projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec} +- projects/{project}/locations/{location}/datasets/{dataset} +- projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice} +- projects/{project}/locations/{location}/trainingPipelines/{training_pipeline} +- projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation} +- projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation} +- projects/{project}/locations/{location}/endpoints/{endpoint} +- projects/{project}/locations/{location}/specialistPools/{specialist_pool} +- projects/{project}/locations/{location}/customJobs/{custom_job} +- projects/{project}/locations/{location}/models/{model} +- projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job} +- projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job} +- projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job} +- projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item} diff --git a/.sample_configs/variants.yaml b/.sample_configs/variants.yaml new file mode 100644 index 0000000000..59a0fd78f5 --- /dev/null +++ b/.sample_configs/variants.yaml @@ -0,0 +1,215 @@ +batch_delete_data_items: +- '' +batch_migrate_resources: +- '' +cancel_batch_prediction_job: +- '' +cancel_custom_job: +- '' +cancel_data_labeling_job: +- '' +cancel_hyperparameter_tuning_job: +- '' +cancel_training_pipeline: +- '' +create_annotation: +- '' +create_annotation_spec: +- '' +create_batch_prediction_job: +- '' +- bigquery +- custom_image_explain +- custom_tabular_explain +- tabular_explain +- text_classification +- text_entity_extraction +- text_sentiment_analysis +- video_action_recognition +- video_classification +- video_object_tracking +create_custom_job: +- '' +- python_package +create_data_labeling_job: +- '' +- active_learning +- image_segmentation +- images +- specialist_pool +- video +create_dataset: +- '' +- image +- tabular_bigquery +- tabular_gcs +- text +- video +create_endpoint: +- '' +create_hyperparameter_tuning_job: +- '' +- python_package +create_specialist_pool: +- '' +create_training_pipeline: +- '' +- custom_job +- custom_training_managed_dataset +- image_classification +- image_object_detection +- tabular_classification +- tabular_regression +- text_classification +- text_entity_extraction +- text_sentiment_analysis +- video_action_recognition +- video_classification +- video_object_tracking +delete_annotation: +- '' +delete_annotation_spec: +- '' +delete_batch_prediction_job: +- '' +delete_custom_job: +- '' +delete_data_item: +- '' +delete_data_labeling_job: +- '' +delete_dataset: +- '' +delete_endpoint: +- '' +delete_hyperparameter_tuning_job: +- '' +delete_model: +- '' +delete_specialist_pool: +- '' +delete_training_pipeline: +- '' +deploy_model: +- '' +- custom_trained_model +explain: +- '' +- custom_image +- custom_tabular +- tabular +export_data: +- '' +export_evaluated_data_items: +- '' +export_model: +- '' +- tabular_classification +- video_action_recognition +get_annotation: +- '' +get_annotation_spec: +- '' +get_batch_prediction_job: +- '' +get_custom_job: +- '' +get_data_item: +- '' +get_data_labeling_job: +- '' +get_dataset: +- '' +get_endpoint: +- '' +get_hyperparameter_tuning_job: +- '' +get_model: +- '' +get_model_evaluation: +- '' +- image_classification +- image_object_detection +- tabular_classification +- tabular_regression +- text_classification +- text_entity_extraction +- text_sentiment_analysis +- video_action_recognition +- video_classification +- video_object_tracking +get_model_evaluation_slice: +- '' +get_specialist_pool: +- '' +get_training_pipeline: +- '' +import_data: +- '' +- image_classification_single_label +- image_object_detection +- text_classification_single_label +- text_entity_extraction +- text_sentiment_analysis +- video_action_recognition +- video_classification +- video_object_tracking +list_annotation_specs: +- '' +list_annotations: +- '' +list_batch_prediction_jobs: +- '' +list_custom_jobs: +- '' +list_data_items: +- '' +list_data_labeling_jobs: +- '' +list_datasets: +- '' +list_endpoints: +- '' +list_hyperparameter_tuning_jobs: +- '' +list_model_evaluation_slices: +- '' +list_model_evaluations: +- '' +list_models: +- '' +list_specialist_pools: +- '' +list_training_pipelines: +- '' +predict: +- '' +- custom_trained_model +- image_classification +- image_file +- image_object_detection +- tabular_classification +- tabular_regression +- text_classification_single_label +- text_entity_extraction +- text_sentiment_analysis +search_migratable_resources: +- '' +undeploy_model: +- '' +update_annotation_spec: +- '' +update_dataset: +- '' +update_endpoint: +- '' +update_model: +- '' +update_specialist_pool: +- '' +upload_model: +- '' +- custom_container +- explain_image_managed_container +- explain_tabular_managed_container +- managed_container From 6096ce73e455f2a1181ffa9dd9c5551ff17e787e Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 8 Dec 2020 14:49:00 -0800 Subject: [PATCH 19/34] chore: sample tests decouple (#124) * chore: decouple samples from sample tests, and refactor sample test fixtures --- .../cancel_training_pipeline_sample_test.py | 56 ++--- samples/snippets/conftest.py | 237 ++++++++++++++++++ ...tch_prediction_job_bigquery_sample_test.py | 27 +- ...create_batch_prediction_job_sample_test.py | 27 +- ...ion_job_text_classification_sample_test.py | 36 +-- ..._job_text_entity_extraction_sample_test.py | 36 +-- ...job_text_sentiment_analysis_sample_test.py | 36 +-- ...b_video_action_recognition_sample_test.py} | 34 +-- ...on_job_video_classification_sample_test.py | 36 +-- ...n_job_video_object_tracking_sample_test.py | 35 +-- .../snippets/create_custom_job_sample_test.py | 24 +- ...beling_job_active_learning_sample_test.py} | 38 +-- ...ing_job_image_segmentation_sample_test.py} | 38 +-- ...te_data_labeling_job_images_sample_test.py | 35 +-- .../create_data_labeling_job_sample_test.py | 35 +-- ...beling_job_specialist_pool_sample_test.py} | 38 +-- ...ate_data_labeling_job_video_sample_test.py | 32 +-- .../create_dataset_image_sample_test.py | 18 +- .../snippets/create_dataset_sample_test.py | 18 +- ...te_dataset_tabular_bigquery_sample_test.py | 18 +- .../create_dataset_tabular_gcs_sample_test.py | 18 +- .../create_dataset_video_sample_test.py | 18 +- .../snippets/create_endpoint_sample_test.py | 15 +- ...r_tuning_job_python_package_sample_test.py | 36 +-- ...e_hyperparameter_tuning_job_sample_test.py | 35 +-- ...raining_pipeline_custom_job_sample_test.py | 42 +--- ...om_training_managed_dataset_sample_test.py | 59 +---- ...peline_image_classification_sample_test.py | 33 +-- ...line_image_object_detection_sample_test.py | 33 +-- .../create_training_pipeline_sample_test.py | 33 +-- ...line_tabular_classification_sample_test.py | 34 +-- ...pipeline_tabular_regression_sample_test.py | 33 +-- ...line_text_entity_extraction_sample_test.py | 33 +-- ...ine_text_sentiment_analysis_sample_test.py | 33 +-- ...e_video_action_recognition_sample_test.py} | 39 +-- ...peline_video_classification_sample_test.py | 33 +-- ...eline_video_object_tracking_sample_test.py | 33 +-- ..._model_custom_trained_model_sample_test.py | 49 +--- samples/snippets/deploy_model_sample_test.py | 47 +--- ...odel_tabular_classification_sample_test.py | 4 +- ...l_video_action_recognition_sample_test.py} | 4 +- .../get_model_evaluation_sample_test.py | 1 - .../get_model_evaluation_slice_sample_test.py | 1 - ...tion_tabular_classification_sample_test.py | 1 - ...aluation_tabular_regression_sample_test.py | 1 - ...n_video_action_recognition_sample_test.py} | 1 - ...uation_video_classification_sample_test.py | 1 - ...ation_video_object_tracking_sample_test.py | 1 - samples/snippets/get_model_sample_test.py | 1 - .../get_training_pipeline_sample_test.py | 1 - ...classification_single_label_sample_test.py | 40 --- ...data_text_entity_extraction_sample_test.py | 40 +-- ...ata_text_sentiment_analysis_sample_test.py | 40 +-- ...ta_video_action_recognition_sample_test.py | 51 ++++ ...port_data_video_action_recognition_test.py | 85 ------- ...t_data_video_classification_sample_test.py | 34 +-- ..._data_video_object_tracking_sample_test.py | 34 +-- samples/snippets/upload_model_sample_test.py | 13 +- 58 files changed, 427 insertions(+), 1437 deletions(-) create mode 100644 samples/snippets/conftest.py rename samples/snippets/{create_batch_prediction_job_video_action_recognition_test.py => create_batch_prediction_job_video_action_recognition_sample_test.py} (67%) rename samples/snippets/{create_data_labeling_job_active_learning_test.py => create_data_labeling_job_active_learning_sample_test.py} (63%) rename samples/snippets/{create_data_labeling_job_image_segmentation_test.py => create_data_labeling_job_image_segmentation_sample_test.py} (65%) rename samples/snippets/{create_data_labeling_job_specialist_pool_test.py => create_data_labeling_job_specialist_pool_sample_test.py} (65%) rename samples/snippets/{create_training_pipeline_video_action_recognition_test.py => create_training_pipeline_video_action_recognition_sample_test.py} (64%) rename samples/snippets/{export_model_video_action_recognition_test.py => export_model_video_action_recognition_sample_test.py} (94%) rename samples/snippets/{get_model_evaluation_video_action_recognition_test.py => get_model_evaluation_video_action_recognition_sample_test.py} (99%) delete mode 100644 samples/snippets/import_data_text_classification_single_label_sample_test.py create mode 100644 samples/snippets/import_data_video_action_recognition_sample_test.py delete mode 100644 samples/snippets/import_data_video_action_recognition_test.py diff --git a/samples/snippets/cancel_training_pipeline_sample_test.py b/samples/snippets/cancel_training_pipeline_sample_test.py index e16b384c22..992cf8269d 100644 --- a/samples/snippets/cancel_training_pipeline_sample_test.py +++ b/samples/snippets/cancel_training_pipeline_sample_test.py @@ -15,12 +15,11 @@ import os from uuid import uuid4 -from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value import pytest import cancel_training_pipeline_sample -import create_training_pipeline_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -30,47 +29,46 @@ TRAINING_DEFINITION_GCS_PATH = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml" -@pytest.fixture(scope="function") -def training_pipeline_id(capsys): - create_training_pipeline_sample.create_training_pipeline_sample( - project=PROJECT_ID, - display_name=DISPLAY_NAME, - training_task_definition=TRAINING_DEFINITION_GCS_PATH, - dataset_id=DATASET_ID, - model_display_name=f"Temp Model for {DISPLAY_NAME}", +@pytest.fixture(autouse=True) +def setup(shared_state, pipeline_client): + training_task_inputs = json_format.ParseDict({}, Value()) + training_pipeline = pipeline_client.create_training_pipeline( + parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", + training_pipeline={ + "display_name": DISPLAY_NAME, + "training_task_definition": TRAINING_DEFINITION_GCS_PATH, + "training_task_inputs": training_task_inputs, + "input_data_config": {"dataset_id": DATASET_ID}, + "model_to_upload": {"display_name": f"Temp Model for {DISPLAY_NAME}"}, + }, ) - out, _ = capsys.readouterr() + shared_state["training_pipeline_name"] = training_pipeline.name - training_pipeline_name = helpers.get_name(out) + yield - assert "/" in training_pipeline_name - training_pipeline_id = training_pipeline_name.split("/")[-1] +@pytest.fixture(autouse=True) +def teardown(shared_state, pipeline_client): + yield - yield training_pipeline_id - - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] ) -def test_ucaip_generated_cancel_training_pipeline_sample(capsys, training_pipeline_id): +def test_ucaip_generated_cancel_training_pipeline_sample( + capsys, shared_state, pipeline_client +): # Run cancel pipeline sample + training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] + cancel_training_pipeline_sample.cancel_training_pipeline_sample( project=PROJECT_ID, training_pipeline_id=training_pipeline_id ) - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - # Waiting for training pipeline to be in CANCELLED state, otherwise raise error helpers.wait_for_job_state( get_job_method=pipeline_client.get_training_pipeline, - name=pipeline_client.training_pipeline_path( - project=PROJECT_ID, - location=LOCATION, - training_pipeline=training_pipeline_id, - ), + name=shared_state["training_pipeline_name"], ) diff --git a/samples/snippets/conftest.py b/samples/snippets/conftest.py new file mode 100644 index 0000000000..2d2dd2fa78 --- /dev/null +++ b/samples/snippets/conftest.py @@ -0,0 +1,237 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from uuid import uuid4 + +from google.cloud import aiplatform +from google.cloud import storage +import pytest + +import helpers + + +@pytest.fixture() +def shared_state(): + state = {} + yield state + + +@pytest.fixture +def storage_client(): + storage_client = storage.Client() + return storage_client + + +@pytest.fixture() +def job_client(): + job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + return job_client + + +@pytest.fixture() +def data_labeling_job_client(): + data_labeling_api_endpoint = os.getenv("DATA_LABELING_API_ENDPOINT") + data_labeling_job_client = aiplatform.gapic.JobServiceClient( + client_options={"api_endpoint": data_labeling_api_endpoint} + ) + return data_labeling_job_client + + +@pytest.fixture +def pipeline_client(): + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + return pipeline_client + + +@pytest.fixture +def model_client(): + model_client = aiplatform.gapic.ModelServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + yield model_client + + +@pytest.fixture +def endpoint_client(): + endpoint_client = aiplatform.gapic.EndpointServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + yield endpoint_client + + +@pytest.fixture +def dataset_client(): + dataset_client = aiplatform.gapic.DatasetServiceClient( + client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} + ) + yield dataset_client + + +# Shared setup/teardown. +@pytest.fixture() +def teardown_batch_prediction_job(shared_state, job_client): + yield + + job_client.cancel_batch_prediction_job( + name=shared_state["batch_prediction_job_name"] + ) + + # Waiting until the job is in CANCELLED state. + helpers.wait_for_job_state( + get_job_method=job_client.get_batch_prediction_job, + name=shared_state["batch_prediction_job_name"], + ) + + job_client.delete_batch_prediction_job( + name=shared_state["batch_prediction_job_name"] + ) + + +@pytest.fixture() +def teardown_data_labeling_job(capsys, shared_state, data_labeling_job_client): + yield + + assert "/" in shared_state["data_labeling_job_name"] + + data_labeling_job_client.cancel_data_labeling_job( + name=shared_state["data_labeling_job_name"] + ) + + # Verify Data Labelling Job is cancelled, or timeout after 400 seconds + helpers.wait_for_job_state( + get_job_method=data_labeling_job_client.get_data_labeling_job, + name=shared_state["data_labeling_job_name"], + timeout=400, + freq=10, + ) + + # Delete the data labeling job + response = data_labeling_job_client.delete_data_labeling_job( + name=shared_state["data_labeling_job_name"] + ) + print("Delete LRO:", response.operation.name) + delete_data_labeling_job_response = response.result(timeout=300) + print("delete_data_labeling_job_response", delete_data_labeling_job_response) + + out, _ = capsys.readouterr() + assert "delete_data_labeling_job_response" in out + + +@pytest.fixture() +def teardown_hyperparameter_tuning_job(shared_state, job_client): + yield + + # Cancel the created hyperparameter tuning job + job_client.cancel_hyperparameter_tuning_job( + name=shared_state["hyperparameter_tuning_job_name"] + ) + + # Waiting for hyperparameter tuning job to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=job_client.get_hyperparameter_tuning_job, + name=shared_state["hyperparameter_tuning_job_name"], + ) + + # Delete the created hyperparameter tuning job + job_client.delete_hyperparameter_tuning_job( + name=shared_state["hyperparameter_tuning_job_name"] + ) + + +@pytest.fixture() +def teardown_training_pipeline(shared_state, pipeline_client): + yield + + pipeline_client.cancel_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + # Waiting for training pipeline to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + ) + + # Delete the training pipeline + pipeline_client.delete_training_pipeline( + name=shared_state["training_pipeline_name"] + ) + + +@pytest.fixture() +def create_dataset(shared_state, dataset_client): + def create( + project, location, metadata_schema_uri, test_name="temp_import_dataset_test" + ): + parent = f"projects/{project}/locations/{location}" + dataset = aiplatform.gapic.Dataset( + display_name=f"{test_name}_{uuid4()}", + metadata_schema_uri=metadata_schema_uri, + ) + + operation = dataset_client.create_dataset(parent=parent, dataset=dataset) + + dataset = operation.result(timeout=300) + shared_state["dataset_name"] = dataset.name + + yield create + + +@pytest.fixture() +def teardown_dataset(shared_state, dataset_client): + yield + + # Delete the created dataset + dataset_client.delete_dataset(name=shared_state["dataset_name"]) + + +@pytest.fixture() +def create_endpoint(shared_state, endpoint_client): + def create(project, location, test_name="temp_deploy_model_test"): + parent = f"projects/{project}/locations/{location}" + endpoint = aiplatform.gapic.Endpoint(display_name=f"{test_name}_{uuid4()}",) + create_endpoint_response = endpoint_client.create_endpoint( + parent=parent, endpoint=endpoint + ) + + endpoint = create_endpoint_response.result() + shared_state["endpoint_name"] = endpoint.name + + yield create + + +@pytest.fixture() +def teardown_endpoint(shared_state, endpoint_client): + yield + + undeploy_model_operation = endpoint_client.undeploy_model( + deployed_model_id=shared_state["deployed_model_id"], + endpoint=shared_state["endpoint_name"], + ) + undeploy_model_operation.result() + + # Delete the endpoint + endpoint_client.delete_endpoint(name=shared_state["endpoint_name"]) + + +@pytest.fixture() +def teardown_model(shared_state, model_client): + yield + + model_client.delete_model(name=shared_state["model_name"]) diff --git a/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py index b5c9b1d98e..d82217aea7 100644 --- a/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_bigquery_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_batch_prediction_job_bigquery_sample @@ -31,34 +30,10 @@ PREDICTIONS_FORMAT = "bigquery" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - return job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, job_client): +def teardown(teardown_batch_prediction_job): yield - job_client.cancel_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) - - # Waiting until the job is in CANCELLED state. - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - job_client.delete_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) - def test_ucaip_generated_create_batch_prediction_job_bigquery_sample( capsys, shared_state diff --git a/samples/snippets/create_batch_prediction_job_sample_test.py b/samples/snippets/create_batch_prediction_job_sample_test.py index 62a03aefc7..6690a11d0b 100644 --- a/samples/snippets/create_batch_prediction_job_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_batch_prediction_job_sample @@ -33,34 +32,10 @@ PREDICTIONS_FORMAT = "jsonl" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - return job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, job_client): +def teardown(teardown_batch_prediction_job): yield - job_client.cancel_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) - - # Waiting until the job is in CANCELLED state. - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - job_client.delete_batch_prediction_job(name=shared_state["batch_prediction_job_name"]) - # Creating AutoML Vision Classification batch prediction job def test_ucaip_generated_create_batch_prediction_sample(capsys, shared_state): diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py index c1f033676d..7f6632e701 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_batch_prediction_job_sample import create_batch_prediction_job_text_classification_sample -import delete_batch_prediction_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -33,36 +30,9 @@ GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -@pytest.fixture(scope="function") -def shared_state(): - - shared_state = {} - - yield shared_state - - assert "/" in shared_state["batch_prediction_job_name"] - - batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] - - # Stop the batch prediction job - cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_batch_prediction_job): + yield # Creating AutoML Text Classification batch prediction job diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py index bf920c22cf..68769c7825 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_batch_prediction_job_sample import create_batch_prediction_job_text_entity_extraction_sample -import delete_batch_prediction_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -33,36 +30,9 @@ GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -@pytest.fixture(scope="function") -def shared_state(): - - shared_state = {} - - yield shared_state - - assert "/" in shared_state["batch_prediction_job_name"] - - batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] - - # Stop the batch prediction job - cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_batch_prediction_job): + yield # Creating AutoML Text Entity Extraction batch prediction job diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py index 192312086f..ca6cf2b0d8 100644 --- a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_batch_prediction_job_sample import create_batch_prediction_job_text_sentiment_analysis_sample -import delete_batch_prediction_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -33,36 +30,9 @@ GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -@pytest.fixture(scope="function") -def shared_state(): - - shared_state = {} - - yield shared_state - - assert "/" in shared_state["batch_prediction_job_name"] - - batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] - - # Stop the batch prediction job - cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_batch_prediction_job): + yield # Creating AutoML Text Sentiment Analysis batch prediction job diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py similarity index 67% rename from samples/snippets/create_batch_prediction_job_video_action_recognition_test.py rename to samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py index 269410ac17..180a259f6f 100644 --- a/samples/snippets/create_batch_prediction_job_video_action_recognition_test.py +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py @@ -15,7 +15,6 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_batch_prediction_job_video_action_recognition_sample @@ -29,43 +28,12 @@ ) GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl" GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -API_ENDPOINT = "us-central1-aiplatform.googleapis.com" - - -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) - yield job_client @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, job_client): +def teardown(teardown_batch_prediction_job): yield - # Stop the batch prediction job - # Delete the batch prediction job - job_client.cancel_batch_prediction_job( - name=shared_state["batch_prediction_job_name"] - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - job_client.delete_batch_prediction_job( - name=shared_state["batch_prediction_job_name"] - ) - # Creating AutoML Video Object Tracking batch prediction job def test_create_batch_prediction_job_video_action_recognition_sample( diff --git a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py index d62d684abe..c3bd6e24a1 100644 --- a/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_classification_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_batch_prediction_job_sample import create_batch_prediction_job_video_classification_sample -import delete_batch_prediction_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -33,36 +30,9 @@ GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -@pytest.fixture(scope="function") -def shared_state(): - - shared_state = {} - - yield shared_state - - assert "/" in shared_state["batch_prediction_job_name"] - - batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] - - # Stop the batch prediction job - cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_batch_prediction_job): + yield # Creating AutoML Video Classification batch prediction job diff --git a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py index 3946db4e69..12426af11b 100644 --- a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_batch_prediction_job_sample import create_batch_prediction_job_video_object_tracking_sample -import delete_batch_prediction_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -33,40 +30,10 @@ GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_batch_prediction_job): yield - assert "/" in shared_state["batch_prediction_job_name"] - - batch_prediction_job = shared_state["batch_prediction_job_name"].split("/")[-1] - - # Stop the batch prediction job - cancel_batch_prediction_job_sample.cancel_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for batch prediction job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_batch_prediction_job, - name=shared_state["batch_prediction_job_name"], - ) - - # Delete the batch prediction job - delete_batch_prediction_job_sample.delete_batch_prediction_job_sample( - project=PROJECT_ID, batch_prediction_job_id=batch_prediction_job - ) - # Creating AutoML Video Object Tracking batch prediction job def test_ucaip_generated_create_batch_prediction_vcn_sample(capsys, shared_state): diff --git a/samples/snippets/create_custom_job_sample_test.py b/samples/snippets/create_custom_job_sample_test.py index 2b699b2061..212dd41e3c 100644 --- a/samples/snippets/create_custom_job_sample_test.py +++ b/samples/snippets/create_custom_job_sample_test.py @@ -16,37 +16,21 @@ import os import uuid -from google.cloud import aiplatform import pytest -import cancel_custom_job_sample import create_custom_job_sample -import delete_custom_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") CONTAINER_IMAGE_URI = "gcr.io/ucaip-test/ucaip-training-test:latest" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(shared_state, job_client): yield - custom_job_id = shared_state["custom_job_name"].split("/")[-1] - # Cancel the created custom job - cancel_custom_job_sample.cancel_custom_job_sample( - project=PROJECT_ID, custom_job_id=custom_job_id - ) - - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) + job_client.cancel_custom_job(name=shared_state["custom_job_name"]) # Waiting for custom job to be in CANCELLED state helpers.wait_for_job_state( @@ -54,9 +38,7 @@ def teardown(shared_state): ) # Delete the created custom job - delete_custom_job_sample.delete_custom_job_sample( - project=PROJECT_ID, custom_job_id=custom_job_id - ) + job_client.delete_custom_job(name=shared_state["custom_job_name"]) def test_ucaip_generated_create_custom_job(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_active_learning_test.py b/samples/snippets/create_data_labeling_job_active_learning_sample_test.py similarity index 63% rename from samples/snippets/create_data_labeling_job_active_learning_test.py rename to samples/snippets/create_data_labeling_job_active_learning_sample_test.py index fff2eff6f4..8ac753eb52 100644 --- a/samples/snippets/create_data_labeling_job_active_learning_test.py +++ b/samples/snippets/create_data_labeling_job_active_learning_sample_test.py @@ -15,7 +15,6 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_data_labeling_job_active_learning_sample @@ -34,45 +33,10 @@ ANNOTATION_SPEC = "rose" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) - yield job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(capsys, shared_state, job_client): +def teardown(teardown_data_labeling_job): yield - job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=job_client.get_data_labeling_job, - name=shared_state["data_labeling_job_name"], - timeout=400, - freq=10, - ) - - # Delete the data labeling job - response = job_client.delete_data_labeling_job( - name=shared_state["data_labeling_job_name"] - ) - - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - - out, _ = capsys.readouterr() - assert "delete_data_labeling_job_response" in out - # Creating a data labeling job for images def test_create_data_labeling_job_active_learning_sample(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_test.py b/samples/snippets/create_data_labeling_job_image_segmentation_sample_test.py similarity index 65% rename from samples/snippets/create_data_labeling_job_image_segmentation_test.py rename to samples/snippets/create_data_labeling_job_image_segmentation_sample_test.py index 5ed031e11b..79ec63f1e9 100644 --- a/samples/snippets/create_data_labeling_job_image_segmentation_test.py +++ b/samples/snippets/create_data_labeling_job_image_segmentation_sample_test.py @@ -15,7 +15,6 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_data_labeling_job_image_segmentation_sample @@ -35,45 +34,10 @@ ANNOTATION_SET_NAME = f"temp_image_segmentation_{uuid.uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) - yield job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(capsys, shared_state, job_client): +def teardown(teardown_data_labeling_job): yield - job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=job_client.get_data_labeling_job, - name=shared_state["data_labeling_job_name"], - timeout=400, - freq=10, - ) - - # Delete the data labeling job - response = job_client.delete_data_labeling_job( - name=shared_state["data_labeling_job_name"] - ) - - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - - out, _ = capsys.readouterr() - assert "delete_data_labeling_job_response" in out - # Creating a data labeling job for images def test_create_data_labeling_job_image_segmentation_sample(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_images_sample_test.py b/samples/snippets/create_data_labeling_job_images_sample_test.py index ae5bdce280..07ecda5d14 100644 --- a/samples/snippets/create_data_labeling_job_images_sample_test.py +++ b/samples/snippets/create_data_labeling_job_images_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_data_labeling_job_images_sample @@ -32,42 +31,10 @@ ANNOTATION_SPEC = "daisy" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(capsys, shared_state): +def teardown(teardown_data_labeling_job): yield - assert "/" in shared_state["data_labeling_job_name"] - - data_labeling_job_id = shared_state["data_labeling_job_name"].split("/")[-1] - - client_options = {"api_endpoint": API_ENDPOINT} - client = aiplatform.gapic.JobServiceClient(client_options=client_options) - - name = client.data_labeling_job_path( - project=PROJECT_ID, location=LOCATION, data_labeling_job=data_labeling_job_id - ) - client.cancel_data_labeling_job(name=name) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=client.get_data_labeling_job, name=name, timeout=400, freq=10 - ) - - # Delete the data labeling job - response = client.delete_data_labeling_job(name=name) - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - - out, _ = capsys.readouterr() - assert "delete_data_labeling_job_response" in out - # Creating a data labeling job for images def test_ucaip_generated_create_data_labeling_job_sample(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_sample_test.py b/samples/snippets/create_data_labeling_job_sample_test.py index 220628d8ba..5a7b714685 100644 --- a/samples/snippets/create_data_labeling_job_sample_test.py +++ b/samples/snippets/create_data_labeling_job_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_data_labeling_job_sample @@ -33,42 +32,10 @@ ANNOTATION_SPEC = "daisy" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(capsys, shared_state): +def teardown(teardown_data_labeling_job): yield - assert "/" in shared_state["data_labeling_job_name"] - - data_labeling_job_id = shared_state["data_labeling_job_name"].split("/")[-1] - - client_options = {"api_endpoint": API_ENDPOINT} - client = aiplatform.gapic.JobServiceClient(client_options=client_options) - - name = client.data_labeling_job_path( - project=PROJECT_ID, location=LOCATION, data_labeling_job=data_labeling_job_id - ) - client.cancel_data_labeling_job(name=name) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=client.get_data_labeling_job, name=name, timeout=400, freq=10 - ) - - # Delete the data labeling job - response = client.delete_data_labeling_job(name=name) - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - - out, _ = capsys.readouterr() - assert "delete_data_labeling_job_response" in out - # Creating a data labeling job for images def test_ucaip_generated_create_data_labeling_job_sample(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_specialist_pool_test.py b/samples/snippets/create_data_labeling_job_specialist_pool_sample_test.py similarity index 65% rename from samples/snippets/create_data_labeling_job_specialist_pool_test.py rename to samples/snippets/create_data_labeling_job_specialist_pool_sample_test.py index afb49613fb..ae7a70cba4 100644 --- a/samples/snippets/create_data_labeling_job_specialist_pool_test.py +++ b/samples/snippets/create_data_labeling_job_specialist_pool_sample_test.py @@ -15,7 +15,6 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_data_labeling_job_specialist_pool_sample @@ -35,45 +34,10 @@ ANNOTATION_SPEC = "rose" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient(client_options=client_options) - yield job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(capsys, shared_state, job_client): +def teardown(teardown_data_labeling_job): yield - job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"]) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=job_client.get_data_labeling_job, - name=shared_state["data_labeling_job_name"], - timeout=400, - freq=10, - ) - - # Delete the data labeling job - response = job_client.delete_data_labeling_job( - name=shared_state["data_labeling_job_name"] - ) - - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - - out, _ = capsys.readouterr() - assert "delete_data_labeling_job_response" in out - # Creating a data labeling job for images def test_create_data_labeling_job_specialist_pool_sample(capsys, shared_state): diff --git a/samples/snippets/create_data_labeling_job_video_sample_test.py b/samples/snippets/create_data_labeling_job_video_sample_test.py index 6d782e4887..53813e4e42 100644 --- a/samples/snippets/create_data_labeling_job_video_sample_test.py +++ b/samples/snippets/create_data_labeling_job_video_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_data_labeling_job_video_sample @@ -32,39 +31,10 @@ ANNOTATION_SPEC = "cartwheel" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_data_labeling_job): yield - assert "/" in shared_state["data_labeling_job_name"] - - data_labeling_job_id = shared_state["data_labeling_job_name"].split("/")[-1] - - client_options = {"api_endpoint": API_ENDPOINT} - client = aiplatform.gapic.JobServiceClient(client_options=client_options) - - name = client.data_labeling_job_path( - project=PROJECT_ID, location=LOCATION, data_labeling_job=data_labeling_job_id - ) - client.cancel_data_labeling_job(name=name) - - # Verify Data Labelling Job is cancelled, or timeout after 400 seconds - helpers.wait_for_job_state( - get_job_method=client.get_data_labeling_job, name=name, timeout=400, freq=10 - ) - - # Delete the data labeling job - response = client.delete_data_labeling_job(name=name) - print("Delete LRO:", response.operation.name) - delete_data_labeling_job_response = response.result(timeout=300) - print("delete_data_labeling_job_response", delete_data_labeling_job_response) - # Creating a data labeling job for images def test_ucaip_generated_create_data_labeling_job_sample(capsys, shared_state): diff --git a/samples/snippets/create_dataset_image_sample_test.py b/samples/snippets/create_dataset_image_sample_test.py index 3d7664a14a..96d9dde089 100644 --- a/samples/snippets/create_dataset_image_sample_test.py +++ b/samples/snippets/create_dataset_image_sample_test.py @@ -18,31 +18,15 @@ import pytest import create_dataset_image_sample -import delete_dataset_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -@pytest.fixture -def shared_state(): - shared_state = {} - yield shared_state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_dataset): yield - assert "/" in shared_state["dataset_name"] - - dataset_id = shared_state["dataset_name"].split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) - def test_ucaip_generated_create_dataset_image(capsys, shared_state): create_dataset_image_sample.create_dataset_image_sample( diff --git a/samples/snippets/create_dataset_sample_test.py b/samples/snippets/create_dataset_sample_test.py index d0a674f101..5394395dec 100644 --- a/samples/snippets/create_dataset_sample_test.py +++ b/samples/snippets/create_dataset_sample_test.py @@ -18,7 +18,6 @@ import pytest import create_dataset_sample -import delete_dataset_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -27,25 +26,10 @@ ) -@pytest.fixture -def shared_state(): - shared_state = {} - yield shared_state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_dataset): yield - assert "/" in shared_state["dataset_name"] - - dataset_id = shared_state["dataset_name"].split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) - def test_ucaip_generated_create_dataset_sample_vision(capsys, shared_state): create_dataset_sample.create_dataset_sample( diff --git a/samples/snippets/create_dataset_tabular_bigquery_sample_test.py b/samples/snippets/create_dataset_tabular_bigquery_sample_test.py index 53f03de97f..d8f8dc1993 100644 --- a/samples/snippets/create_dataset_tabular_bigquery_sample_test.py +++ b/samples/snippets/create_dataset_tabular_bigquery_sample_test.py @@ -18,32 +18,16 @@ import pytest import create_dataset_tabular_bigquery_sample -import delete_dataset_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") BIGQUERY_URI = "bq://ucaip-sample-tests.table_test.all_bq_types" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_dataset): yield - assert "/" in shared_state["dataset_name"] - - dataset_id = shared_state["dataset_name"].split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) - def test_ucaip_generated_create_dataset_tabular_bigquery(capsys, shared_state): create_dataset_tabular_bigquery_sample.create_dataset_tabular_bigquery_sample( diff --git a/samples/snippets/create_dataset_tabular_gcs_sample_test.py b/samples/snippets/create_dataset_tabular_gcs_sample_test.py index 98e21c881e..aaf5c9bad3 100644 --- a/samples/snippets/create_dataset_tabular_gcs_sample_test.py +++ b/samples/snippets/create_dataset_tabular_gcs_sample_test.py @@ -18,32 +18,16 @@ import pytest import create_dataset_tabular_gcs_sample -import delete_dataset_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") GCS_URI = "gs://ucaip-sample-resources/iris_1000.csv" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_dataset): yield - assert "/" in shared_state["dataset_name"] - - dataset_id = shared_state["dataset_name"].split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) - def test_ucaip_generated_create_dataset_tabular_gcs(capsys, shared_state): create_dataset_tabular_gcs_sample.create_dataset_tabular_gcs_sample( diff --git a/samples/snippets/create_dataset_video_sample_test.py b/samples/snippets/create_dataset_video_sample_test.py index 1e6f85b6bd..d2c499ae4c 100644 --- a/samples/snippets/create_dataset_video_sample_test.py +++ b/samples/snippets/create_dataset_video_sample_test.py @@ -18,7 +18,6 @@ import pytest import create_dataset_video_sample -import delete_dataset_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -27,25 +26,10 @@ ) -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_dataset): yield - assert "/" in shared_state["dataset_name"] - - dataset_id = shared_state["dataset_name"].split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) - def test_ucaip_generated_create_dataset_video_sample_vision(capsys, shared_state): create_dataset_video_sample.create_dataset_video_sample( diff --git a/samples/snippets/create_endpoint_sample_test.py b/samples/snippets/create_endpoint_sample_test.py index 2613a4afd0..5d6c66e510 100644 --- a/samples/snippets/create_endpoint_sample_test.py +++ b/samples/snippets/create_endpoint_sample_test.py @@ -18,29 +18,18 @@ import pytest import create_endpoint_sample -import delete_endpoint_sample import helpers DISPLAY_NAME = f"temp_create_endpoint_test_{uuid4()}" PROJECT = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(shared_state, endpoint_client): yield - endpoint_id = shared_state["endpoint_name"].split("/")[-1] - # Delete the endpoint that was just created - delete_endpoint_sample.delete_endpoint_sample( - project=PROJECT, endpoint_id=endpoint_id - ) + endpoint_client.delete_endpoint(name=shared_state["endpoint_name"]) def test_ucaip_generated_create_endpoint_sample(capsys, shared_state): diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py index ab8e975fcd..f430fc38ed 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample_test.py @@ -15,16 +15,12 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_hyperparameter_tuning_job_python_package_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") - -API_ENDPOINT = "us-central1-aiplatform.googleapis.com" - DISPLAY_NAME = ( f"temp_create_hyperparameter_tuning_job_python_package_test_{uuid.uuid4()}" ) @@ -34,40 +30,10 @@ PYTHON_MODULE = "trainer.hptuning_trainer" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def job_client(): - client_options = {"api_endpoint": API_ENDPOINT} - job_client = aiplatform.gapic.JobServiceClient( - client_options=client_options) - yield job_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, job_client): +def teardown(teardown_hyperparameter_tuning_job): yield - # Cancel the created hyperparameter tuning job - job_client.cancel_hyperparameter_tuning_job( - name=shared_state["hyperparameter_tuning_job_name"] - ) - - # Waiting for hyperparameter tuning job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_hyperparameter_tuning_job, - name=shared_state["hyperparameter_tuning_job_name"], - ) - - # Delete the created hyperparameter tuning job - job_client.delete_hyperparameter_tuning_job( - name=shared_state["hyperparameter_tuning_job_name"] - ) - def test_create_hyperparameter_tuning_job_python_package_sample(capsys, shared_state): diff --git a/samples/snippets/create_hyperparameter_tuning_job_sample_test.py b/samples/snippets/create_hyperparameter_tuning_job_sample_test.py index 78799d7554..ad1f0ae4db 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_sample_test.py +++ b/samples/snippets/create_hyperparameter_tuning_job_sample_test.py @@ -15,52 +15,19 @@ import os import uuid -from google.cloud import aiplatform import pytest -import cancel_hyperparameter_tuning_job_sample import create_hyperparameter_tuning_job_sample -import delete_hyperparameter_tuning_job_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") CONTAINER_IMAGE_URI = "gcr.io/ucaip-test/ucaip-training-test:latest" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_hyperparameter_tuning_job): yield - hyperparameter_tuning_job_id = shared_state["hyperparameter_tuning_job_name"].split( - "/" - )[-1] - - # Cancel the created hyperparameter tuning job - cancel_hyperparameter_tuning_job_sample.cancel_hyperparameter_tuning_job_sample( - project=PROJECT_ID, hyperparameter_tuning_job_id=hyperparameter_tuning_job_id - ) - - job_client = aiplatform.gapic.JobServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for hyperparameter tuning job to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=job_client.get_hyperparameter_tuning_job, - name=shared_state["hyperparameter_tuning_job_name"], - ) - - # Delete the created hyperparameter tuning job - delete_hyperparameter_tuning_job_sample.delete_hyperparameter_tuning_job_sample( - project=PROJECT_ID, hyperparameter_tuning_job_id=hyperparameter_tuning_job_id - ) - def test_ucaip_generated_create_hyperparameter_tuning_job(capsys, shared_state): create_hyperparameter_tuning_job_sample.create_hyperparameter_tuning_job_sample( diff --git a/samples/snippets/create_training_pipeline_custom_job_sample_test.py b/samples/snippets/create_training_pipeline_custom_job_sample_test.py index 1d3655c6f3..5a0677f0a1 100644 --- a/samples/snippets/create_training_pipeline_custom_job_sample_test.py +++ b/samples/snippets/create_training_pipeline_custom_job_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import create_training_pipeline_custom_job_sample @@ -25,50 +24,21 @@ DISPLAY_NAME = f"temp_create_training_pipeline_custom_job_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def pipeline_client(): - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - return pipeline_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, pipeline_client): +def teardown(teardown_training_pipeline): yield - shared_state["training_pipeline_name"].split("/")[-1] - - pipeline_client.cancel_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - pipeline_client.delete_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - -def test_ucaip_generated_create_training_pipeline_custom_job_sample(capsys, shared_state): +def test_ucaip_generated_create_training_pipeline_custom_job_sample( + capsys, shared_state +): create_training_pipeline_custom_job_sample.create_training_pipeline_custom_job_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, model_display_name=f"Temp Model for {DISPLAY_NAME}", - container_image_uri='gcr.io/ucaip-sample-tests/mnist-custom-job:latest', - base_output_directory_prefix='gs://ucaip-samples-us-central1/training_pipeline_output' + container_image_uri="gcr.io/ucaip-sample-tests/mnist-custom-job:latest", + base_output_directory_prefix="gs://ucaip-samples-us-central1/training_pipeline_output", ) out, _ = capsys.readouterr() diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py index 6d75cf249c..82725f3847 100644 --- a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample_test.py @@ -15,57 +15,31 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_training_pipeline_custom_training_managed_dataset_sample import helpers -API_ENDPOINT = "us-central1-aiplatform.googleapis.com" - PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -DISPLAY_NAME = f"temp_create_training_pipeline_custom_training_managed_dataset_test_{uuid.uuid4()}" +DISPLAY_NAME = ( + f"temp_create_training_pipeline_custom_training_managed_dataset_test_{uuid.uuid4()}" +) MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" DATASET_ID = "1084241610289446912" # permanent_50_flowers_dataset ANNOTATION_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/annotation/image_classification_1.0.0.yaml" -TRAINING_CONTAINER_SPEC_IMAGE_URI = "gcr.io/ucaip-test/custom-container-managed-dataset:latest" +TRAINING_CONTAINER_SPEC_IMAGE_URI = ( + "gcr.io/ucaip-test/custom-container-managed-dataset:latest" +) MODEL_CONTAINER_SPEC_IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest" BASE_OUTPUT_URI_PREFIX = "gs://ucaip-samples-us-central1/training_pipeline_output/custom_training_managed_dataset" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def pipeline_client(): - client_options = {"api_endpoint": API_ENDPOINT} - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options=client_options - ) - yield pipeline_client - - -@pytest.fixture -def model_client(): - client_options = {"api_endpoint": API_ENDPOINT} - model_client = aiplatform.gapic.ModelServiceClient( - client_options=client_options) - yield model_client - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, model_client, pipeline_client): +def teardown(teardown_training_pipeline): yield - model_client.delete_model(name=shared_state["model_name"]) - pipeline_client.delete_training_pipeline( - name=shared_state["training_pipeline_name"] - ) def test_create_training_pipeline_custom_training_managed_dataset_sample( @@ -83,24 +57,7 @@ def test_create_training_pipeline_custom_training_managed_dataset_sample( ) out, _ = capsys.readouterr() + assert "response:" in out # Save resource name of the newly created training pipeline shared_state["training_pipeline_name"] = helpers.get_name(out) - - # Poll until the pipeline succeeds because we want to test the model_upload step as well. - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - expected_state="SUCCEEDED", - timeout=1800, - freq=20, - ) - - training_pipeline = pipeline_client.get_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - - # Check that the model indeed has been uploaded. - assert training_pipeline.model_to_upload.name != "" - - shared_state["model_name"] = training_pipeline.model_to_upload.name diff --git a/samples/snippets/create_training_pipeline_image_classification_sample_test.py b/samples/snippets/create_training_pipeline_image_classification_sample_test.py index 89e8804a92..e6f9eca397 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_image_classification_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_image_classification_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - def test_ucaip_generated_create_training_pipeline_video_classification_sample( capsys, shared_state diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py b/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py index 24f26f33e3..46f4e786ef 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_image_object_detection_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_image_obj_detection_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - def test_ucaip_generated_create_training_pipeline_image_object_dectection( capsys, shared_state diff --git a/samples/snippets/create_training_pipeline_sample_test.py b/samples/snippets/create_training_pipeline_sample_test.py index 4677004e4e..38771638f9 100644 --- a/samples/snippets/create_training_pipeline_sample_test.py +++ b/samples/snippets/create_training_pipeline_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -29,38 +26,10 @@ TRAINING_DEFINITION_GCS_PATH = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - # Training AutoML Vision Model def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): diff --git a/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py b/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py index fe61a82188..00c64ab1f6 100644 --- a/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_tabular_classification_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_tabular_classification_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -30,39 +27,10 @@ PREDICTION_TYPE = "classification" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options=client_options - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): diff --git a/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py b/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py index ab126f8df9..0209d6087e 100644 --- a/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py +++ b/samples/snippets/create_training_pipeline_tabular_regression_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_tabular_regression_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -30,38 +27,10 @@ PREDICTION_TYPE = "regression" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py index 4f54c0fc51..e7dabbae76 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_text_entity_extraction_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_ten_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - # Training Text Entity Extraction Model def test_ucaip_generated_create_training_pipeline_text_entity_extraction_sample( diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py index ecaa041cc6..0e2af0f967 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_text_sentiment_analysis_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_tsa_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - # Training Text Sentiment Analysis Model def test_ucaip_generated_create_training_pipeline_text_sentiment_analysis_sample( diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_test.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample_test.py similarity index 64% rename from samples/snippets/create_training_pipeline_video_action_recognition_test.py rename to samples/snippets/create_training_pipeline_video_action_recognition_sample_test.py index cd73df7286..c4fca042dd 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_test.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample_test.py @@ -15,7 +15,6 @@ import os import uuid -from google.cloud import aiplatform import pytest import create_training_pipeline_video_action_recognition_sample @@ -31,49 +30,15 @@ ) MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}" MODEL_TYPE = "CLOUD" -API_ENDPOINT = "us-central1-aiplatform.googleapis.com" - - -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def pipeline_client(): - client_options = {"api_endpoint": API_ENDPOINT} - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options=client_options - ) - yield pipeline_client @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, pipeline_client): +def teardown(teardown_training_pipeline): yield - # Stop the training pipeline - pipeline_client.cancel_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - pipeline_client.delete_training_pipeline( - name=shared_state["training_pipeline_name"] - ) - # Training AutoML Vision Model -def test_create_training_pipeline_video_action_recognition_sample( - capsys, shared_state -): +def test_create_training_pipeline_video_action_recognition_sample(capsys, shared_state): create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, diff --git a/samples/snippets/create_training_pipeline_video_classification_sample_test.py b/samples/snippets/create_training_pipeline_video_classification_sample_test.py index cc0a826967..f3479e1d59 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample_test.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_video_classification_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_vcn_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - # Training AutoML Vision Model def test_ucaip_generated_create_training_pipeline_video_classification_sample( diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py index fd1788ae7b..634c74bac9 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample_test.py @@ -15,12 +15,9 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import cancel_training_pipeline_sample import create_training_pipeline_video_object_tracking_sample -import delete_training_pipeline_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -28,38 +25,10 @@ DISPLAY_NAME = f"temp_create_training_pipeline_vot_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_training_pipeline): yield - training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] - - # Stop the training pipeline - cancel_training_pipeline_sample.cancel_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - - pipeline_client = aiplatform.gapic.PipelineServiceClient( - client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"} - ) - - # Waiting for training pipeline to be in CANCELLED state - helpers.wait_for_job_state( - get_job_method=pipeline_client.get_training_pipeline, - name=shared_state["training_pipeline_name"], - ) - - # Delete the training pipeline - delete_training_pipeline_sample.delete_training_pipeline_sample( - project=PROJECT_ID, training_pipeline_id=training_pipeline_id - ) - # Training AutoML Vision Model def test_ucaip_generated_create_training_pipeline_video_object_tracking_sample( diff --git a/samples/snippets/deploy_model_custom_trained_model_sample_test.py b/samples/snippets/deploy_model_custom_trained_model_sample_test.py index 43cf53cdf7..3b45805a16 100644 --- a/samples/snippets/deploy_model_custom_trained_model_sample_test.py +++ b/samples/snippets/deploy_model_custom_trained_model_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest import deploy_model_custom_trained_model_sample @@ -23,46 +22,32 @@ PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -PARENT = f"projects/{PROJECT_ID}/locations/{LOCATION}" -DISPLAY_NAME = f"temp_deploy_model_custom_trained_model_test_{uuid4()}" # Resource Name of "permanent_custom_mnist_model" MODEL_NAME = "projects/580378083368/locations/us-central1/models/4992732768149438464" -@pytest.fixture -def shared_state(): - state = {} - yield state - - -@pytest.fixture -def endpoint_client(): - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - endpoint_client = aiplatform.gapic.EndpointServiceClient( - client_options=client_options - ) - return endpoint_client +@pytest.fixture(scope="function", autouse=True) +def setup(create_endpoint): + create_endpoint(PROJECT_ID, LOCATION) + yield @pytest.fixture(scope="function", autouse=True) -def setup(shared_state, endpoint_client): - create_endpoint_response = endpoint_client.create_endpoint( - parent=PARENT, endpoint={"display_name": DISPLAY_NAME} - ) - shared_state["endpoint"] = create_endpoint_response.result().name +def teardown(teardown_endpoint): + yield def test_ucaip_generated_deploy_model_custom_trained_model_sample(capsys, shared_state): - assert shared_state["endpoint"] is not None + assert shared_state["endpoint_name"] is not None # Deploy existing image classification model to endpoint deploy_model_custom_trained_model_sample.deploy_model_custom_trained_model_sample( project=PROJECT_ID, model_name=MODEL_NAME, - deployed_model_display_name=DISPLAY_NAME, - endpoint_id=shared_state["endpoint"].split("/")[-1], + deployed_model_display_name=f"temp_deploy_model_test_{uuid4()}", + endpoint_id=shared_state["endpoint_name"].split("/")[-1], ) # Store deployed model ID for undeploying @@ -70,19 +55,3 @@ def test_ucaip_generated_deploy_model_custom_trained_model_sample(capsys, shared assert "deploy_model_response" in out shared_state["deployed_model_id"] = helpers.get_name(out=out, key="id") - - -@pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, endpoint_client): - yield - - undeploy_model_operation = endpoint_client.undeploy_model( - deployed_model_id=shared_state["deployed_model_id"], - endpoint=shared_state["endpoint"], - ) - undeploy_model_operation.result() - - # Delete the endpoint - endpoint_client.delete_endpoint( - name=shared_state["endpoint"] - ) diff --git a/samples/snippets/deploy_model_sample_test.py b/samples/snippets/deploy_model_sample_test.py index a3cc96b879..e739d75f2f 100644 --- a/samples/snippets/deploy_model_sample_test.py +++ b/samples/snippets/deploy_model_sample_test.py @@ -15,52 +15,39 @@ import os from uuid import uuid4 -from google.cloud import aiplatform import pytest -import delete_endpoint_sample import deploy_model_sample import helpers PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" -PARENT = f"projects/{PROJECT_ID}/locations/{LOCATION}" -DISPLAY_NAME = f"temp_deploy_model_test_{uuid4()}" # Resource Name of "permanent_50_flowers_new_model" MODEL_NAME = "projects/580378083368/locations/us-central1/models/4190810559500779520" -CLIENT_OPTIONS = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} -@pytest.fixture -def shared_state(): - state = {} - yield state +@pytest.fixture(scope="function", autouse=True) +def setup(create_endpoint): + create_endpoint(PROJECT_ID, LOCATION) + yield @pytest.fixture(scope="function", autouse=True) -def setup(shared_state): - - # Create an temporary endpoint and store resource name - shared_state["endpoint_client"] = aiplatform.gapic.EndpointServiceClient( - client_options=CLIENT_OPTIONS - ) - create_endpoint_response = shared_state["endpoint_client"].create_endpoint( - parent=PARENT, endpoint={"display_name": DISPLAY_NAME} - ) - shared_state["endpoint"] = create_endpoint_response.result().name +def teardown(teardown_endpoint): + yield def test_ucaip_generated_deploy_model_sample(capsys, shared_state): - assert shared_state["endpoint"] is not None + assert shared_state["endpoint_name"] is not None # Deploy existing image classification model to endpoint deploy_model_sample.deploy_model_sample( project=PROJECT_ID, model_name=MODEL_NAME, - deployed_model_display_name=DISPLAY_NAME, - endpoint_id=shared_state["endpoint"].split("/")[-1], + deployed_model_display_name=f"temp_deploy_model_test_{uuid4()}", + endpoint_id=shared_state["endpoint_name"].split("/")[-1], ) # Store deployed model ID for undeploying @@ -68,19 +55,3 @@ def test_ucaip_generated_deploy_model_sample(capsys, shared_state): assert "deploy_model_response" in out shared_state["deployed_model_id"] = helpers.get_name(out=out, key="id") - - -@pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): - yield - - undeploy_model_operation = shared_state["endpoint_client"].undeploy_model( - deployed_model_id=shared_state["deployed_model_id"], - endpoint=shared_state["endpoint"], - ) - undeploy_model_operation.result() - - # Delete the endpoint - delete_endpoint_sample.delete_endpoint_sample( - project=PROJECT_ID, endpoint_id=shared_state["endpoint"].split("/")[-1] - ) diff --git a/samples/snippets/export_model_tabular_classification_sample_test.py b/samples/snippets/export_model_tabular_classification_sample_test.py index 4145f7d050..e34bc08410 100644 --- a/samples/snippets/export_model_tabular_classification_sample_test.py +++ b/samples/snippets/export_model_tabular_classification_sample_test.py @@ -15,7 +15,6 @@ import os from uuid import uuid4 -from google.cloud import storage import pytest import export_model_tabular_classification_sample @@ -27,10 +26,9 @@ @pytest.fixture(scope="function", autouse=True) -def teardown(): +def teardown(storage_client): yield - storage_client = storage.Client() bucket = storage_client.get_bucket("ucaip-samples-test-output") blobs = bucket.list_blobs(prefix=GCS_PREFIX) for blob in blobs: diff --git a/samples/snippets/export_model_video_action_recognition_test.py b/samples/snippets/export_model_video_action_recognition_sample_test.py similarity index 94% rename from samples/snippets/export_model_video_action_recognition_test.py rename to samples/snippets/export_model_video_action_recognition_sample_test.py index ff7341114d..cee43a2911 100644 --- a/samples/snippets/export_model_video_action_recognition_test.py +++ b/samples/snippets/export_model_video_action_recognition_sample_test.py @@ -14,7 +14,6 @@ import os -from google.cloud import storage import pytest import export_model_video_action_recognition_sample @@ -30,10 +29,9 @@ @pytest.fixture(scope="function", autouse=True) -def teardown(): +def teardown(storage_client): yield - storage_client = storage.Client() bucket = storage_client.get_bucket("ucaip-samples-test-output") blobs = bucket.list_blobs(prefix="tmp/export_model_video_action_recognition_sample") for blob in blobs: diff --git a/samples/snippets/get_model_evaluation_sample_test.py b/samples/snippets/get_model_evaluation_sample_test.py index 57e1b95f63..f0ae8f6042 100644 --- a/samples/snippets/get_model_evaluation_sample_test.py +++ b/samples/snippets/get_model_evaluation_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_slice_sample_test.py b/samples/snippets/get_model_evaluation_slice_sample_test.py index 3e2cea3e5e..1552c114e8 100644 --- a/samples/snippets/get_model_evaluation_slice_sample_test.py +++ b/samples/snippets/get_model_evaluation_slice_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_slice_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py index 2d9ddb7356..e25b2921a0 100644 --- a/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_classification_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_tabular_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py index 2481021cd3..d01fdbf515 100644 --- a/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py +++ b/samples/snippets/get_model_evaluation_tabular_regression_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_test.py b/samples/snippets/get_model_evaluation_video_action_recognition_sample_test.py similarity index 99% rename from samples/snippets/get_model_evaluation_video_action_recognition_test.py rename to samples/snippets/get_model_evaluation_video_action_recognition_sample_test.py index c7218141a6..4d0b5d3674 100644 --- a/samples/snippets/get_model_evaluation_video_action_recognition_test.py +++ b/samples/snippets/get_model_evaluation_video_action_recognition_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_classification_sample_test.py b/samples/snippets/get_model_evaluation_video_classification_sample_test.py index 01c0b4cefa..cbd38ad3b6 100644 --- a/samples/snippets/get_model_evaluation_video_classification_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_classification_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_video_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py index e924040d42..c74d5ae194 100644 --- a/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py +++ b/samples/snippets/get_model_evaluation_video_object_tracking_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_evaluation_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_model_sample_test.py b/samples/snippets/get_model_sample_test.py index 8c3afb86d9..c2e94f6dd0 100644 --- a/samples/snippets/get_model_sample_test.py +++ b/samples/snippets/get_model_sample_test.py @@ -14,7 +14,6 @@ import os - import get_model_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/get_training_pipeline_sample_test.py b/samples/snippets/get_training_pipeline_sample_test.py index 4c3726431c..d16972c048 100644 --- a/samples/snippets/get_training_pipeline_sample_test.py +++ b/samples/snippets/get_training_pipeline_sample_test.py @@ -14,7 +14,6 @@ import os - import get_training_pipeline_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") diff --git a/samples/snippets/import_data_text_classification_single_label_sample_test.py b/samples/snippets/import_data_text_classification_single_label_sample_test.py deleted file mode 100644 index afcc0786cf..0000000000 --- a/samples/snippets/import_data_text_classification_single_label_sample_test.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest.mock import MagicMock, mock_open, patch - - -import import_data_text_classification_single_label_sample - -# Test to assert that the import data function was called. We assert that the function was called -# rather than wait for this LRO to complete - - -def test_ucaip_generated_import_data_text_classification_single_label_sample(): - response = MagicMock() - response.next_page_token = b"" - rpc = MagicMock(return_value=response) - - mock_channel = MagicMock() - mock_channel.unary_unary = MagicMock(return_value=rpc) - - with patch( - "google.api_core.grpc_helpers.create_channel", return_value=mock_channel - ), patch("time.sleep"), patch("builtins.open", mock_open(read_data=b"")): - import_data_text_classification_single_label_sample.import_data_text_classification_single_label_sample( - gcs_source_uri="GCS_SOURCE_URI", project="PROJECT", dataset_id="DATASET_ID" - ) - - rpc.assert_called() diff --git a/samples/snippets/import_data_text_entity_extraction_sample_test.py b/samples/snippets/import_data_text_entity_extraction_sample_test.py index 98c5df48f9..378dac434a 100644 --- a/samples/snippets/import_data_text_entity_extraction_sample_test.py +++ b/samples/snippets/import_data_text_entity_extraction_sample_test.py @@ -1,18 +1,9 @@ import os -from uuid import uuid4 -from google.cloud import aiplatform import pytest -import delete_dataset_sample import import_data_text_entity_extraction_sample -print( - f"uCAIP Library Source:\t{aiplatform.__file__}" -) # Package source location sanity check -print( - f"uCAIP Import Source:\t{import_data_text_entity_extraction_sample.__file__}" -) # Package source location sanity check PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" @@ -23,35 +14,20 @@ @pytest.fixture(scope="function", autouse=True) -def dataset_name(): - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) +def setup(create_dataset): + create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI) + yield - dataset = aiplatform.gapic.Dataset( - display_name=f"temp_import_dataset_test_{uuid4()}", - metadata_schema_uri=METADATA_SCHEMA_URI, - ) - - operation = client.create_dataset( - parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset - ) - - created_dataset = operation.result() - yield created_dataset.name - - dataset_id = created_dataset.name.split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_dataset): + yield def test_ucaip_generated_import_data_text_entity_extraction_sample( - capsys, dataset_name + capsys, shared_state ): - dataset_id = dataset_name.split("/")[-1] + dataset_id = shared_state["dataset_name"].split("/")[-1] import_data_text_entity_extraction_sample.import_data_text_entity_extraction_sample( gcs_source_uri=GCS_SOURCE, project=PROJECT_ID, dataset_id=dataset_id diff --git a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py index 76f8512475..2bca6f4779 100644 --- a/samples/snippets/import_data_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/import_data_text_sentiment_analysis_sample_test.py @@ -1,18 +1,9 @@ import os -from uuid import uuid4 -from google.cloud import aiplatform import pytest -import delete_dataset_sample import import_data_text_sentiment_analysis_sample -print( - f"uCAIP Library Source:\t{aiplatform.__file__}" -) # Package source location sanity check -print( - f"uCAIP Import Source:\t{import_data_text_sentiment_analysis_sample.__file__}" -) # Package source location sanity check PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" @@ -23,35 +14,20 @@ @pytest.fixture(scope="function", autouse=True) -def dataset_name(): - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) +def setup(create_dataset): + create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI) + yield - dataset = aiplatform.gapic.Dataset( - display_name=f"temp_import_dataset_test_{uuid4()}", - metadata_schema_uri=METADATA_SCHEMA_URI, - ) - - operation = client.create_dataset( - parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset - ) - - created_dataset = operation.result(timeout=600) - yield created_dataset.name - - dataset_id = created_dataset.name.split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_dataset): + yield def test_ucaip_generated_import_data_text_sentiment_analysis_sample( - capsys, dataset_name + capsys, shared_state ): - dataset_id = dataset_name.split("/")[-1] + dataset_id = shared_state["dataset_name"].split("/")[-1] import_data_text_sentiment_analysis_sample.import_data_text_sentiment_analysis_sample( gcs_source_uri=GCS_SOURCE, project=PROJECT_ID, dataset_id=dataset_id diff --git a/samples/snippets/import_data_video_action_recognition_sample_test.py b/samples/snippets/import_data_video_action_recognition_sample_test.py new file mode 100644 index 0000000000..8f4199c6a0 --- /dev/null +++ b/samples/snippets/import_data_video_action_recognition_sample_test.py @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +import pytest + +import import_data_video_action_recognition_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" +METADATA_SCHEMA_URI = ( + "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" +) + + +@pytest.fixture(scope="function", autouse=True) +def setup(create_dataset): + create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI) + yield + + +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_dataset): + yield + + +def test_import_data_video_action_recognition_sample( + capsys, shared_state, dataset_client +): + dataset_id = shared_state["dataset_name"].split("/")[-1] + + import_data_video_action_recognition_sample.import_data_video_action_recognition_sample( + project=PROJECT_ID, dataset_id=dataset_id, gcs_source_uri=GCS_SOURCE, + ) + out, _ = capsys.readouterr() + + assert "import_data_response" in out diff --git a/samples/snippets/import_data_video_action_recognition_test.py b/samples/snippets/import_data_video_action_recognition_test.py deleted file mode 100644 index f8912bfa7a..0000000000 --- a/samples/snippets/import_data_video_action_recognition_test.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import uuid - -from google.cloud import aiplatform -import pytest - -import import_data_video_action_recognition_sample - -PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") -LOCATION = "us-central1" -GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl" -METADATA_SCHEMA_URI = ( - "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" -) - -API_ENDPOINT = "us-central1-aiplatform.googleapis.com" -DISPLAY_NAME = f"temp_import_data_video_action_recognition_test_{uuid.uuid4()}" - - -@pytest.fixture -def shared_state(): - shared_state = {} - yield shared_state - - -@pytest.fixture -def dataset_client(): - client_options = {"api_endpoint": API_ENDPOINT} - dataset_client = aiplatform.gapic.DatasetServiceClient( - client_options=client_options - ) - yield dataset_client - - -@pytest.fixture(scope="function", autouse=True) -def teardown(shared_state, dataset_client): - - yield - dataset_name = dataset_client.dataset_path( - project=PROJECT_ID, location=LOCATION, dataset=shared_state["dataset_id"] - ) - response = dataset_client.delete_dataset(name=dataset_name) - response.result(timeout=120) - - -def test_import_data_video_action_recognition_sample( - capsys, shared_state, dataset_client -): - - dataset = aiplatform.gapic.Dataset( - display_name=DISPLAY_NAME, metadata_schema_uri=METADATA_SCHEMA_URI, - ) - - response = dataset_client.create_dataset( - parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset - ) - - create_dataset_response = response.result(timeout=600) - - shared_state["dataset_name"] = create_dataset_response.name - shared_state["dataset_id"] = create_dataset_response.name.split("/")[-1] - - import_data_video_action_recognition_sample.import_data_video_action_recognition_sample( - project=PROJECT_ID, - dataset_id=shared_state["dataset_id"], - gcs_source_uri=GCS_SOURCE, - ) - out, _ = capsys.readouterr() - - assert "import_data_response" in out diff --git a/samples/snippets/import_data_video_classification_sample_test.py b/samples/snippets/import_data_video_classification_sample_test.py index b9d0cea46d..089b9a8b5b 100644 --- a/samples/snippets/import_data_video_classification_sample_test.py +++ b/samples/snippets/import_data_video_classification_sample_test.py @@ -14,12 +14,9 @@ import os -from uuid import uuid4 -from google.cloud import aiplatform import pytest -import delete_dataset_sample import import_data_video_classification_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -31,35 +28,20 @@ @pytest.fixture(scope="function", autouse=True) -def dataset_name(): - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) +def setup(create_dataset): + create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI) + yield - dataset = aiplatform.gapic.Dataset( - display_name=f"temp_import_dataset_test_{uuid4()}", - metadata_schema_uri=METADATA_SCHEMA_URI, - ) - - operation = client.create_dataset( - parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset - ) - - created_dataset = operation.result(timeout=120) - yield created_dataset.name - - dataset_id = created_dataset.name.split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_dataset): + yield def test_ucaip_generated_import_data_video_classification_sample_single_label_image( - capsys, dataset_name + capsys, shared_state ): - dataset_id = dataset_name.split("/")[-1] + dataset_id = shared_state["dataset_name"].split("/")[-1] import_data_video_classification_sample.import_data_video_classification_sample( project=PROJECT_ID, dataset_id=dataset_id, gcs_source_uri=GCS_SOURCE, diff --git a/samples/snippets/import_data_video_object_tracking_sample_test.py b/samples/snippets/import_data_video_object_tracking_sample_test.py index 22b2e61710..6ba16a9c43 100644 --- a/samples/snippets/import_data_video_object_tracking_sample_test.py +++ b/samples/snippets/import_data_video_object_tracking_sample_test.py @@ -14,12 +14,9 @@ import os -from uuid import uuid4 -from google.cloud import aiplatform import pytest -import delete_dataset_sample import import_data_video_object_tracking_sample PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") @@ -31,35 +28,20 @@ @pytest.fixture(scope="function", autouse=True) -def dataset_name(): - client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} - client = aiplatform.gapic.DatasetServiceClient(client_options=client_options) +def setup(create_dataset): + create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI) + yield - dataset = aiplatform.gapic.Dataset( - display_name=f"temp_import_dataset_test_{uuid4()}", - metadata_schema_uri=METADATA_SCHEMA_URI, - ) - - operation = client.create_dataset( - parent=f"projects/{PROJECT_ID}/locations/{LOCATION}", dataset=dataset - ) - - created_dataset = operation.result(timeout=120) - yield created_dataset.name - - dataset_id = created_dataset.name.split("/")[-1] - - # Delete the created dataset - delete_dataset_sample.delete_dataset_sample( - project=PROJECT_ID, dataset_id=dataset_id - ) +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_dataset): + yield def test_ucaip_generated_import_data_video_object_tracking_sample_single_label_image( - capsys, dataset_name + capsys, shared_state ): - dataset_id = dataset_name.split("/")[-1] + dataset_id = shared_state["dataset_name"].split("/")[-1] import_data_video_object_tracking_sample.import_data_video_object_tracking_sample( project=PROJECT_ID, dataset_id=dataset_id, gcs_source_uri=GCS_SOURCE, diff --git a/samples/snippets/upload_model_sample_test.py b/samples/snippets/upload_model_sample_test.py index 78943f666e..8a38605455 100644 --- a/samples/snippets/upload_model_sample_test.py +++ b/samples/snippets/upload_model_sample_test.py @@ -17,7 +17,6 @@ import pytest -import delete_model_sample import helpers import upload_model_sample @@ -27,20 +26,10 @@ DISPLAY_NAME = f"temp_upload_model_test_{uuid4()}" -@pytest.fixture -def shared_state(): - state = {} - yield state - - @pytest.fixture(scope="function", autouse=True) -def teardown(shared_state): +def teardown(teardown_model): yield - model_id = shared_state["model_name"].split("/")[-1] - - delete_model_sample.delete_model_sample(project=PROJECT_ID, model_id=model_id) - def test_ucaip_generated_upload_model_sample(capsys, shared_state): From 5cf38596d115da63cdddc8958b6ae8f455bdb9a6 Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Thu, 10 Dec 2020 13:56:35 -0800 Subject: [PATCH 20/34] feat: xai samples (#83) --- samples/snippets/explain_tabular_sample.py | 68 +++++++++++++++ .../snippets/explain_tabular_sample_test.py | 37 ++++++++ ..._explain_image_managed_container_sample.py | 83 ++++++++++++++++++ ...ain_image_managed_container_sample_test.py | 52 +++++++++++ ...xplain_tabular_managed_container_sample.py | 87 +++++++++++++++++++ ...n_tabular_managed_container_sample_test.py | 54 ++++++++++++ 6 files changed, 381 insertions(+) create mode 100644 samples/snippets/explain_tabular_sample.py create mode 100644 samples/snippets/explain_tabular_sample_test.py create mode 100644 samples/snippets/upload_model_explain_image_managed_container_sample.py create mode 100644 samples/snippets/upload_model_explain_image_managed_container_sample_test.py create mode 100644 samples/snippets/upload_model_explain_tabular_managed_container_sample.py create mode 100644 samples/snippets/upload_model_explain_tabular_managed_container_sample_test.py diff --git a/samples/snippets/explain_tabular_sample.py b/samples/snippets/explain_tabular_sample.py new file mode 100644 index 0000000000..c0997d77b6 --- /dev/null +++ b/samples/snippets/explain_tabular_sample.py @@ -0,0 +1,68 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_explain_tabular_sample] +from typing import Dict + +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def explain_tabular_sample( + project: str, + endpoint_id: str, + instance_dict: Dict, + location: str = "us-central1", + api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PredictionServiceClient(client_options=client_options) + # The format of each instance should conform to the deployed model's prediction input schema. + instance = json_format.ParseDict(instance_dict, Value()) + instances = [instance] + # tabular models do not have additional parameters + parameters_dict = {} + parameters = json_format.ParseDict(parameters_dict, Value()) + endpoint = client.endpoint_path( + project=project, location=location, endpoint=endpoint_id + ) + response = client.explain( + endpoint=endpoint, instances=instances, parameters=parameters + ) + print("response") + print(" deployed_model_id:", response.deployed_model_id) + explanations = response.explanations + for explanation in explanations: + print(" explanation") + # Feature attributions. + attributions = explanation.attributions + for attribution in attributions: + print(" attribution") + print(" baseline_output_value:", attribution.baseline_output_value) + print(" instance_output_value:", attribution.instance_output_value) + print(" output_display_name:", attribution.output_display_name) + print(" approximation_error:", attribution.approximation_error) + print(" output_name:", attribution.output_name) + output_index = attribution.output_index + for output_index in output_index: + print(" output_index:", output_index) + predictions = response.predictions + for prediction in predictions: + print(" prediction:", dict(prediction)) + + +# [END aiplatform_explain_tabular_sample] diff --git a/samples/snippets/explain_tabular_sample_test.py b/samples/snippets/explain_tabular_sample_test.py new file mode 100644 index 0000000000..910107ac37 --- /dev/null +++ b/samples/snippets/explain_tabular_sample_test.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import explain_tabular_sample + +ENDPOINT_ID = "4966625964059525120" # iris 1000 +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") + +INSTANCE = { + "petal_length": "1.4", + "petal_width": "1.3", + "sepal_length": "5.1", + "sepal_width": "2.8", +} + + +def test_ucaip_generated_explain_tabular_sample(capsys): + + explain_tabular_sample.explain_tabular_sample( + instance_dict=INSTANCE, project=PROJECT_ID, endpoint_id=ENDPOINT_ID + ) + + out, _ = capsys.readouterr() + assert 'attribution' in out diff --git a/samples/snippets/upload_model_explain_image_managed_container_sample.py b/samples/snippets/upload_model_explain_image_managed_container_sample.py new file mode 100644 index 0000000000..0b5f46533a --- /dev/null +++ b/samples/snippets/upload_model_explain_image_managed_container_sample.py @@ -0,0 +1,83 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_upload_model_explain_image_managed_container_sample] +from google.cloud import aiplatform + + +def upload_model_explain_image_managed_container_sample( + project: str, + display_name: str, + container_spec_image_uri: str, + artifact_uri: str, + input_tensor_name: str, + output_tensor_name: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 300, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + + # Container specification for deploying the model + container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []} + + # The explainabilty method and corresponding parameters + parameters = aiplatform.gapic.ExplanationParameters( + {"xrai_attribution": {"step_count": 1}} + ) + + # The input tensor for feature attribution to the output + # For single input model, y = f(x), this will be the serving input layer. + input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata( + { + "input_tensor_name": input_tensor_name, + # Input is image data + "modality": "image", + } + ) + + # The output tensor to explain + # For single output model, y = f(x), this will be the serving output layer. + output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata( + {"output_tensor_name": output_tensor_name} + ) + + # Assemble the explanation metadata + metadata = aiplatform.gapic.ExplanationMetadata( + inputs={"image": input_metadata}, outputs={"prediction": output_metadata} + ) + + # Assemble the explanation specification + explanation_spec = aiplatform.gapic.ExplanationSpec( + parameters=parameters, metadata=metadata + ) + + model = aiplatform.gapic.Model( + display_name=display_name, + # The Cloud Storage location of the custom model + artifact_uri=artifact_uri, + explanation_spec=explanation_spec, + container_spec=container_spec, + ) + parent = f"projects/{project}/locations/{location}" + response = client.upload_model(parent=parent, model=model) + print("Long running operation:", response.operation.name) + upload_model_response = response.result(timeout=timeout) + print("upload_model_response:", upload_model_response) + + +# [END aiplatform_upload_model_explain_image_managed_container_sample] diff --git a/samples/snippets/upload_model_explain_image_managed_container_sample_test.py b/samples/snippets/upload_model_explain_image_managed_container_sample_test.py new file mode 100644 index 0000000000..e93b9348ec --- /dev/null +++ b/samples/snippets/upload_model_explain_image_managed_container_sample_test.py @@ -0,0 +1,52 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from uuid import uuid4 + +import pytest + +import helpers + +import upload_model_explain_image_managed_container_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest" +ARTIFACT_URI = "gs://ucaip-samples-us-central1/model/cifar" +DISPLAY_NAME = f"temp_upload_model_explain_image_managed_container_sample_{uuid4()}" + +INPUT_TENSOR_NAME = "bytes_inputs" +OUTPUT_TENSOR_NAME = "output_0" + + +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_model): + yield + + +def test_ucaip_generated_upload_model_explain_image_managed_container_sample(capsys, shared_state): + + upload_model_explain_image_managed_container_sample.upload_model_explain_image_managed_container_sample( + display_name=DISPLAY_NAME, + artifact_uri=ARTIFACT_URI, + container_spec_image_uri=IMAGE_URI, + project=PROJECT_ID, + input_tensor_name=INPUT_TENSOR_NAME, + output_tensor_name=OUTPUT_TENSOR_NAME + ) + + out, _ = capsys.readouterr() + + shared_state["model_name"] = helpers.get_name(out, key="model") diff --git a/samples/snippets/upload_model_explain_tabular_managed_container_sample.py b/samples/snippets/upload_model_explain_tabular_managed_container_sample.py new file mode 100644 index 0000000000..5d2d56a868 --- /dev/null +++ b/samples/snippets/upload_model_explain_tabular_managed_container_sample.py @@ -0,0 +1,87 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_upload_model_explain_tabular_managed_container_sample] +from google.cloud import aiplatform + + +def upload_model_explain_tabular_managed_container_sample( + project: str, + display_name: str, + container_spec_image_uri: str, + artifact_uri: str, + input_tensor_name: str, + output_tensor_name: str, + feature_names: list, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", + timeout: int = 300, +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.ModelServiceClient(client_options=client_options) + + # Container specification for deploying the model + container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []} + + # The explainabilty method and corresponding parameters + parameters = aiplatform.gapic.ExplanationParameters( + {"xrai_attribution": {"step_count": 1}} + ) + + # The input tensor for feature attribution to the output + # For single input model, y = f(x), this will be the serving input layer. + input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata( + { + "input_tensor_name": input_tensor_name, + # Input is tabular data + "modality": "numeric", + # Assign feature names to the inputs for explanation + "encoding": "BAG_OF_FEATURES", + "index_feature_mapping": feature_names, + } + ) + + # The output tensor to explain + # For single output model, y = f(x), this will be the serving output layer. + output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata( + {"output_tensor_name": output_tensor_name} + ) + + # Assemble the explanation metadata + metadata = aiplatform.gapic.ExplanationMetadata( + inputs={"features": input_metadata}, outputs={"prediction": output_metadata} + ) + + # Assemble the explanation specification + explanation_spec = aiplatform.gapic.ExplanationSpec( + parameters=parameters, metadata=metadata + ) + + model = aiplatform.gapic.Model( + display_name=display_name, + # The Cloud Storage location of the custom model + artifact_uri=artifact_uri, + explanation_spec=explanation_spec, + container_spec=container_spec, + ) + parent = f"projects/{project}/locations/{location}" + response = client.upload_model(parent=parent, model=model) + print("Long running operation:", response.operation.name) + upload_model_response = response.result(timeout=timeout) + print("upload_model_response:", upload_model_response) + + +# [END aiplatform_upload_model_explain_tabular_managed_container_sample] diff --git a/samples/snippets/upload_model_explain_tabular_managed_container_sample_test.py b/samples/snippets/upload_model_explain_tabular_managed_container_sample_test.py new file mode 100644 index 0000000000..581a11e5b4 --- /dev/null +++ b/samples/snippets/upload_model_explain_tabular_managed_container_sample_test.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from uuid import uuid4 + +import pytest + +import helpers + +import upload_model_explain_tabular_managed_container_sample + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest" +ARTIFACT_URI = "gs://ucaip-samples-us-central1/model/boston_housing/" +DISPLAY_NAME = f"temp_upload_model_test_{uuid4()}" + +INPUT_TENSOR_NAME = "dense_input" +OUTPUT_TENSOR_NAME = "dense_2" + + +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_model): + yield + + +def test_ucaip_generated_upload_model_explain_tabular_managed_constainer_sample(capsys, shared_state): + + upload_model_explain_tabular_managed_container_sample.upload_model_explain_tabular_managed_container_sample( + display_name=DISPLAY_NAME, + artifact_uri=ARTIFACT_URI, + container_spec_image_uri=IMAGE_URI, + project=PROJECT_ID, + input_tensor_name=INPUT_TENSOR_NAME, + output_tensor_name=OUTPUT_TENSOR_NAME, + feature_names=["crim", "zn", "indus", "chas", "nox", "rm", "age", + "dis", "rad", "tax", "ptratio", "b", "lstat"] + ) + + out, _ = capsys.readouterr() + + shared_state["model_name"] = helpers.get_name(out, key="model") From fe40777fde10b7e9af8633d14eb2c8b9382653b6 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 15 Dec 2020 12:52:37 -0800 Subject: [PATCH 21/34] chore: update create batch prediction job samples (#133) --- ...create_batch_prediction_job_text_classification_sample.py | 4 +++- ...ate_batch_prediction_job_text_entity_extraction_sample.py | 4 +++- ...te_batch_prediction_job_text_sentiment_analysis_sample.py | 4 +++- ...create_batch_prediction_job_text_classification_sample.py | 5 ++++- ...ate_batch_prediction_job_text_entity_extraction_sample.py | 5 ++++- ...te_batch_prediction_job_text_sentiment_analysis_sample.py | 5 ++++- 6 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py index 0cac68a867..f4447f9d1a 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py @@ -24,12 +24,14 @@ def make_batch_prediction_job( gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py index 0cac68a867..f4447f9d1a 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py @@ -24,12 +24,14 @@ def make_batch_prediction_job( gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py index 0cac68a867..f4447f9d1a 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -24,12 +24,14 @@ def make_batch_prediction_job( gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + model_parameters_dict = {} + model_parameters = to_protobuf_value(model_parameters_dict) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample.py b/samples/snippets/create_batch_prediction_job_text_classification_sample.py index a9a9ad6e67..636db6faa2 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_create_batch_prediction_job_text_classification_sample] from google.cloud import aiplatform +from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -30,12 +31,14 @@ def create_batch_prediction_job_text_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = {} + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py index ec950613a0..e12d54cf65 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_create_batch_prediction_job_text_entity_extraction_sample] from google.cloud import aiplatform +from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -30,12 +31,14 @@ def create_batch_prediction_job_text_entity_extraction_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = {} + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py index 22bd7a31c4..36b2af5cf8 100644 --- a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_create_batch_prediction_job_text_sentiment_analysis_sample] from google.cloud import aiplatform +from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -30,12 +31,14 @@ def create_batch_prediction_job_text_sentiment_analysis_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.JobServiceClient(client_options=client_options) + model_parameters_dict = {} + model_parameters = json_format.ParseDict(model_parameters_dict, Value()) batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model, - "model_parameters": Value(), + "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_source_uri]}, From 07ae55a51a6b3fa6191da31d4b504a54d20d4d13 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 15 Dec 2020 22:33:57 +0100 Subject: [PATCH 22/34] chore(deps): update dependency pytest to v6.2.1 (#130) --- samples/snippets/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt index e9b3e8a556..696d57eefe 100644 --- a/samples/snippets/requirements.txt +++ b/samples/snippets/requirements.txt @@ -1,3 +1,3 @@ -pytest==6.1.2 +pytest==6.2.1 google-cloud-storage>=1.26.0, <2.0.0dev google-cloud-aiplatform==0.3.1 From 53cdbabdef6bd10488f49d0c3ed6f05149af32a6 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Tue, 15 Dec 2020 17:03:17 -0800 Subject: [PATCH 23/34] fix: blacken on library, test files (#135) * fix: blacken on library, test files Co-authored-by: Bu Sun Kim --- .../services/dataset_service/transports/base.py | 8 ++++---- .../services/dataset_service/transports/grpc.py | 3 +-- .../services/dataset_service/transports/grpc_asyncio.py | 8 ++++---- .../services/endpoint_service/transports/base.py | 8 ++++---- .../services/endpoint_service/transports/grpc.py | 3 +-- .../services/endpoint_service/transports/grpc_asyncio.py | 8 ++++---- .../services/job_service/transports/base.py | 8 ++++---- .../services/job_service/transports/grpc.py | 3 +-- .../services/job_service/transports/grpc_asyncio.py | 8 ++++---- .../services/migration_service/transports/base.py | 8 ++++---- .../services/migration_service/transports/grpc.py | 3 +-- .../services/migration_service/transports/grpc_asyncio.py | 8 ++++---- .../services/model_service/transports/base.py | 8 ++++---- .../services/model_service/transports/grpc.py | 3 +-- .../services/model_service/transports/grpc_asyncio.py | 8 ++++---- .../services/pipeline_service/transports/base.py | 8 ++++---- .../services/pipeline_service/transports/grpc.py | 3 +-- .../services/pipeline_service/transports/grpc_asyncio.py | 8 ++++---- .../services/prediction_service/transports/base.py | 8 ++++---- .../services/prediction_service/transports/grpc.py | 3 +-- .../prediction_service/transports/grpc_asyncio.py | 8 ++++---- .../services/specialist_pool_service/transports/base.py | 8 ++++---- .../services/specialist_pool_service/transports/grpc.py | 3 +-- .../specialist_pool_service/transports/grpc_asyncio.py | 8 ++++---- 24 files changed, 72 insertions(+), 80 deletions(-) diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 56f567959a..583e9864cd 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 2647c4bd9c..7120c2eb9a 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -228,8 +228,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index 1f22b10f3e..aff766aa24 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -137,10 +137,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py index e55589de8f..88b2b17c57 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -73,10 +73,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py index 70915facf0..5a2dee4d5a 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -227,8 +227,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py index f4e362281b..661c63e9b9 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -136,10 +136,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index 3d1f0be59b..abedda51f9 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -86,10 +86,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index f4b610bd53..859efdd7e7 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -242,8 +242,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 83cc826484..7d203c8d18 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -151,10 +151,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py index cbcb288489..e5feef70cf 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -71,10 +71,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py index efd4c4b6a4..b73e3936d5 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -229,8 +229,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py index ba038f57c5..969d1a3b12 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -138,10 +138,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index 2f87fc98dd..a0b896cdf4 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -75,10 +75,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 442b665d3a..98f90e9dc8 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -231,8 +231,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index 13e9848290..bce1fed9a6 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -140,10 +140,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 41123b8615..25e8acb412 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -76,10 +76,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 4fc6389449..fc6ca0087e 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -232,8 +232,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 2e6f51e1a3..173c771eab 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -141,10 +141,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index 0c82f7d83c..f2f7a028cc 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 1a102e1a61..fbfbabef1b 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -225,8 +225,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index a0785007db..237fa8a75c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -134,10 +134,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py index f1af058030..a39c2f1f71 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -72,10 +72,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py index 2d1442ae33..2fc7e0881b 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -233,8 +233,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py index 7d038edc4f..a6d3b045e6 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -142,10 +142,10 @@ def __init__( for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: From dd8677c87a8e818230f39fc959b613f9ed241e9c Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 16 Dec 2020 14:52:10 -0800 Subject: [PATCH 24/34] chore: add api_endpoint_comment (#138) * chore: add api_endpoint_comment --- ...create_batch_prediction_job_text_classification_sample.py | 4 ++-- ...ate_batch_prediction_job_text_entity_extraction_sample.py | 4 ++-- ...te_batch_prediction_job_text_sentiment_analysis_sample.py | 4 ++-- ...e_batch_prediction_job_video_action_recognition_sample.py | 4 ++-- .sample_configs/process_configs.yaml | 1 + samples/snippets/cancel_batch_prediction_job_sample.py | 1 + samples/snippets/cancel_custom_job_sample.py | 1 + samples/snippets/cancel_data_labeling_job_sample.py | 1 + samples/snippets/cancel_hyperparameter_tuning_job_sample.py | 1 + samples/snippets/cancel_training_pipeline_sample.py | 1 + .../snippets/create_batch_prediction_job_bigquery_sample.py | 1 + samples/snippets/create_batch_prediction_job_sample.py | 1 + ...create_batch_prediction_job_text_classification_sample.py | 5 +++-- ...e_batch_prediction_job_text_classification_sample_test.py | 2 +- ...ate_batch_prediction_job_text_entity_extraction_sample.py | 5 +++-- ...atch_prediction_job_text_entity_extraction_sample_test.py | 2 +- ...te_batch_prediction_job_text_sentiment_analysis_sample.py | 5 +++-- ...tch_prediction_job_text_sentiment_analysis_sample_test.py | 2 +- ...e_batch_prediction_job_video_action_recognition_sample.py | 5 +++-- ...ch_prediction_job_video_action_recognition_sample_test.py | 2 +- ...reate_batch_prediction_job_video_classification_sample.py | 1 + ...eate_batch_prediction_job_video_object_tracking_sample.py | 1 + samples/snippets/create_custom_job_sample.py | 1 + .../create_data_labeling_job_active_learning_sample.py | 1 + .../create_data_labeling_job_image_segmentation_sample.py | 1 + samples/snippets/create_data_labeling_job_images_sample.py | 1 + samples/snippets/create_data_labeling_job_sample.py | 1 + .../create_data_labeling_job_specialist_pool_sample.py | 1 + samples/snippets/create_data_labeling_job_video_sample.py | 1 + samples/snippets/create_dataset_image_sample.py | 1 + samples/snippets/create_dataset_sample.py | 1 + samples/snippets/create_dataset_tabular_bigquery_sample.py | 1 + samples/snippets/create_dataset_tabular_gcs_sample.py | 1 + samples/snippets/create_dataset_text_sample.py | 1 + samples/snippets/create_dataset_video_sample.py | 1 + samples/snippets/create_endpoint_sample.py | 1 + ...create_hyperparameter_tuning_job_python_package_sample.py | 1 + samples/snippets/create_hyperparameter_tuning_job_sample.py | 1 + .../snippets/create_training_pipeline_custom_job_sample.py | 1 + ...aining_pipeline_custom_training_managed_dataset_sample.py | 1 + .../create_training_pipeline_image_classification_sample.py | 1 + ...create_training_pipeline_image_object_detection_sample.py | 1 + samples/snippets/create_training_pipeline_sample.py | 1 + ...create_training_pipeline_tabular_classification_sample.py | 1 + .../create_training_pipeline_tabular_regression_sample.py | 1 + .../create_training_pipeline_text_classification_sample.py | 1 + ...create_training_pipeline_text_entity_extraction_sample.py | 1 + ...reate_training_pipeline_text_sentiment_analysis_sample.py | 1 + ...eate_training_pipeline_video_action_recognition_sample.py | 1 + .../create_training_pipeline_video_classification_sample.py | 1 + .../create_training_pipeline_video_object_tracking_sample.py | 1 + samples/snippets/delete_batch_prediction_job_sample.py | 1 + samples/snippets/delete_custom_job_sample.py | 1 + samples/snippets/delete_data_labeling_job_sample.py | 1 + samples/snippets/delete_dataset_sample.py | 1 + samples/snippets/delete_endpoint_sample.py | 1 + samples/snippets/delete_hyperparameter_tuning_job_sample.py | 1 + samples/snippets/delete_model_sample.py | 1 + samples/snippets/delete_training_pipeline_sample.py | 1 + samples/snippets/deploy_model_custom_trained_model_sample.py | 1 + samples/snippets/deploy_model_sample.py | 1 + samples/snippets/explain_tabular_sample.py | 1 + samples/snippets/export_model_sample.py | 1 + .../snippets/export_model_tabular_classification_sample.py | 1 + .../snippets/export_model_video_action_recognition_sample.py | 1 + samples/snippets/get_batch_prediction_job_sample.py | 1 + samples/snippets/get_custom_job_sample.py | 1 + samples/snippets/get_hyperparameter_tuning_job_sample.py | 1 + .../get_model_evaluation_image_classification_sample.py | 1 + .../get_model_evaluation_image_object_detection_sample.py | 1 + samples/snippets/get_model_evaluation_sample.py | 1 + samples/snippets/get_model_evaluation_slice_sample.py | 1 + .../get_model_evaluation_tabular_classification_sample.py | 1 + .../get_model_evaluation_tabular_regression_sample.py | 1 + .../get_model_evaluation_text_classification_sample.py | 1 + .../get_model_evaluation_text_entity_extraction_sample.py | 1 + .../get_model_evaluation_text_sentiment_analysis_sample.py | 1 + .../get_model_evaluation_video_action_recognition_sample.py | 1 + .../get_model_evaluation_video_classification_sample.py | 1 + .../get_model_evaluation_video_object_tracking_sample.py | 1 + samples/snippets/get_model_sample.py | 1 + samples/snippets/get_training_pipeline_sample.py | 1 + .../import_data_image_classification_single_label_sample.py | 1 + .../snippets/import_data_image_object_detection_sample.py | 1 + samples/snippets/import_data_sample.py | 1 + .../import_data_text_classification_single_label_sample.py | 1 + .../snippets/import_data_text_entity_extraction_sample.py | 1 + .../snippets/import_data_text_sentiment_analysis_sample.py | 1 + .../snippets/import_data_video_action_recognition_sample.py | 1 + samples/snippets/import_data_video_classification_sample.py | 1 + samples/snippets/import_data_video_object_tracking_sample.py | 1 + samples/snippets/list_model_evaluation_slices_sample.py | 1 + samples/snippets/predict_custom_trained_model_sample.py | 1 + samples/snippets/predict_image_classification_sample.py | 1 + samples/snippets/predict_image_object_detection_sample.py | 1 + samples/snippets/predict_sample.py | 1 + samples/snippets/predict_tabular_classification_sample.py | 1 + samples/snippets/predict_tabular_regression_sample.py | 1 + .../predict_text_classification_single_label_sample.py | 1 + samples/snippets/predict_text_entity_extraction_sample.py | 1 + samples/snippets/predict_text_sentiment_analysis_sample.py | 1 + .../upload_model_explain_image_managed_container_sample.py | 1 + .../upload_model_explain_tabular_managed_container_sample.py | 1 + samples/snippets/upload_model_sample.py | 1 + 104 files changed, 116 insertions(+), 20 deletions(-) diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py index f4447f9d1a..f7ef5e26e2 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py @@ -20,7 +20,7 @@ def make_parent(parent: str) -> str: def make_batch_prediction_job( display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: @@ -30,7 +30,7 @@ def make_batch_prediction_job( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py index f4447f9d1a..f7ef5e26e2 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_entity_extraction_sample.py @@ -20,7 +20,7 @@ def make_parent(parent: str) -> str: def make_batch_prediction_job( display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: @@ -30,7 +30,7 @@ def make_batch_prediction_job( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py index f4447f9d1a..f7ef5e26e2 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -20,7 +20,7 @@ def make_parent(parent: str) -> str: def make_batch_prediction_job( display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: @@ -30,7 +30,7 @@ def make_batch_prediction_job( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py index 56c9412eb3..a9c1af1180 100644 --- a/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py +++ b/.sample_configs/param_handlers/create_batch_prediction_job_video_action_recognition_sample.py @@ -22,7 +22,7 @@ def make_parent(parent: str) -> str: def make_batch_prediction_job( display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, ) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: @@ -34,7 +34,7 @@ def make_batch_prediction_job( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 7eb80f400f..7001e974ac 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -1,5 +1,6 @@ global: api_endpoint: us-central1-aiplatform.googleapis.com + api_endpoint_comment: The AI Platform services require regional API endpoints. timeout: 300 skip: - etag diff --git a/samples/snippets/cancel_batch_prediction_job_sample.py b/samples/snippets/cancel_batch_prediction_job_sample.py index 7b7b95a8c3..d9eab6c84e 100644 --- a/samples/snippets/cancel_batch_prediction_job_sample.py +++ b/samples/snippets/cancel_batch_prediction_job_sample.py @@ -22,6 +22,7 @@ def cancel_batch_prediction_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/cancel_custom_job_sample.py b/samples/snippets/cancel_custom_job_sample.py index 237d872a08..4544a6d0b2 100644 --- a/samples/snippets/cancel_custom_job_sample.py +++ b/samples/snippets/cancel_custom_job_sample.py @@ -22,6 +22,7 @@ def cancel_custom_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/cancel_data_labeling_job_sample.py b/samples/snippets/cancel_data_labeling_job_sample.py index 4ad232812b..cf2ae20977 100644 --- a/samples/snippets/cancel_data_labeling_job_sample.py +++ b/samples/snippets/cancel_data_labeling_job_sample.py @@ -22,6 +22,7 @@ def cancel_data_labeling_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/cancel_hyperparameter_tuning_job_sample.py b/samples/snippets/cancel_hyperparameter_tuning_job_sample.py index 120e257c36..7264571160 100644 --- a/samples/snippets/cancel_hyperparameter_tuning_job_sample.py +++ b/samples/snippets/cancel_hyperparameter_tuning_job_sample.py @@ -22,6 +22,7 @@ def cancel_hyperparameter_tuning_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/cancel_training_pipeline_sample.py b/samples/snippets/cancel_training_pipeline_sample.py index 6ecc71bde7..06f7284ba3 100644 --- a/samples/snippets/cancel_training_pipeline_sample.py +++ b/samples/snippets/cancel_training_pipeline_sample.py @@ -22,6 +22,7 @@ def cancel_training_pipeline_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_batch_prediction_job_bigquery_sample.py b/samples/snippets/create_batch_prediction_job_bigquery_sample.py index 7747333cab..e050976cb3 100644 --- a/samples/snippets/create_batch_prediction_job_bigquery_sample.py +++ b/samples/snippets/create_batch_prediction_job_bigquery_sample.py @@ -29,6 +29,7 @@ def create_batch_prediction_job_bigquery_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_batch_prediction_job_sample.py b/samples/snippets/create_batch_prediction_job_sample.py index ea89e7b885..adf62fd941 100644 --- a/samples/snippets/create_batch_prediction_job_sample.py +++ b/samples/snippets/create_batch_prediction_job_sample.py @@ -29,6 +29,7 @@ def create_batch_prediction_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample.py b/samples/snippets/create_batch_prediction_job_text_classification_sample.py index 636db6faa2..1480eb4aef 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample.py @@ -21,12 +21,13 @@ def create_batch_prediction_job_text_classification_sample( project: str, display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. @@ -37,7 +38,7 @@ def create_batch_prediction_job_text_classification_sample( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py index 7f6632e701..2561facac8 100644 --- a/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_classification_sample_test.py @@ -43,7 +43,7 @@ def test_ucaip_generated_create_batch_prediction_tcn_sample(capsys, shared_state create_batch_prediction_job_text_classification_sample.create_batch_prediction_job_text_classification_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, - model=model_name, + model_name=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, ) diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py index e12d54cf65..186b74b4af 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample.py @@ -21,12 +21,13 @@ def create_batch_prediction_job_text_entity_extraction_sample( project: str, display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. @@ -37,7 +38,7 @@ def create_batch_prediction_job_text_entity_extraction_sample( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py index 68769c7825..d7e6469715 100644 --- a/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_entity_extraction_sample_test.py @@ -43,7 +43,7 @@ def test_ucaip_generated_create_batch_prediction_ten_sample(capsys, shared_state create_batch_prediction_job_text_entity_extraction_sample.create_batch_prediction_job_text_entity_extraction_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, - model=model_name, + model_name=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, ) diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py index 36b2af5cf8..e35c545fca 100644 --- a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample.py @@ -21,12 +21,13 @@ def create_batch_prediction_job_text_sentiment_analysis_sample( project: str, display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. @@ -37,7 +38,7 @@ def create_batch_prediction_job_text_sentiment_analysis_sample( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py index ca6cf2b0d8..9695873668 100644 --- a/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_text_sentiment_analysis_sample_test.py @@ -43,7 +43,7 @@ def test_ucaip_generated_create_batch_prediction_tsn_sample(capsys, shared_state create_batch_prediction_job_text_sentiment_analysis_sample.create_batch_prediction_job_text_sentiment_analysis_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, - model=model_name, + model_name=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, ) diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py index e5775d3e36..1d4f3c67ce 100644 --- a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample.py @@ -21,12 +21,13 @@ def create_batch_prediction_job_video_action_recognition_sample( project: str, display_name: str, - model: str, + model_name: str, gcs_source_uri: str, gcs_destination_output_uri_prefix: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. @@ -39,7 +40,7 @@ def create_batch_prediction_job_video_action_recognition_sample( batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' - "model": model, + "model": model_name, "model_parameters": model_parameters, "input_config": { "instances_format": "jsonl", diff --git a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py index 180a259f6f..d2295cd0c8 100644 --- a/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py +++ b/samples/snippets/create_batch_prediction_job_video_action_recognition_sample_test.py @@ -45,7 +45,7 @@ def test_create_batch_prediction_job_video_action_recognition_sample( create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, - model=model_name, + model_name=model_name, gcs_source_uri=GCS_SOURCE_URI, gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, ) diff --git a/samples/snippets/create_batch_prediction_job_video_classification_sample.py b/samples/snippets/create_batch_prediction_job_video_classification_sample.py index 96d8c89503..7da19784a2 100644 --- a/samples/snippets/create_batch_prediction_job_video_classification_sample.py +++ b/samples/snippets/create_batch_prediction_job_video_classification_sample.py @@ -27,6 +27,7 @@ def create_batch_prediction_job_video_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample.py b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample.py index 2ccbfc9087..31ab011701 100644 --- a/samples/snippets/create_batch_prediction_job_video_object_tracking_sample.py +++ b/samples/snippets/create_batch_prediction_job_video_object_tracking_sample.py @@ -27,6 +27,7 @@ def create_batch_prediction_job_video_object_tracking_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_custom_job_sample.py b/samples/snippets/create_custom_job_sample.py index 1493a22ae3..e15ecce26b 100644 --- a/samples/snippets/create_custom_job_sample.py +++ b/samples/snippets/create_custom_job_sample.py @@ -23,6 +23,7 @@ def create_custom_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_active_learning_sample.py b/samples/snippets/create_data_labeling_job_active_learning_sample.py index 86360b7b34..334ca02de6 100644 --- a/samples/snippets/create_data_labeling_job_active_learning_sample.py +++ b/samples/snippets/create_data_labeling_job_active_learning_sample.py @@ -28,6 +28,7 @@ def create_data_labeling_job_active_learning_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_image_segmentation_sample.py b/samples/snippets/create_data_labeling_job_image_segmentation_sample.py index 94a80b9dd8..c9303a2b2d 100644 --- a/samples/snippets/create_data_labeling_job_image_segmentation_sample.py +++ b/samples/snippets/create_data_labeling_job_image_segmentation_sample.py @@ -29,6 +29,7 @@ def create_data_labeling_job_image_segmentation_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_images_sample.py b/samples/snippets/create_data_labeling_job_images_sample.py index 396b0d1823..3bee0bb27c 100644 --- a/samples/snippets/create_data_labeling_job_images_sample.py +++ b/samples/snippets/create_data_labeling_job_images_sample.py @@ -27,6 +27,7 @@ def create_data_labeling_job_images_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_sample.py b/samples/snippets/create_data_labeling_job_sample.py index 37dbde2073..0499a0d923 100644 --- a/samples/snippets/create_data_labeling_job_sample.py +++ b/samples/snippets/create_data_labeling_job_sample.py @@ -28,6 +28,7 @@ def create_data_labeling_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_specialist_pool_sample.py b/samples/snippets/create_data_labeling_job_specialist_pool_sample.py index 5cbded1fea..306113e584 100644 --- a/samples/snippets/create_data_labeling_job_specialist_pool_sample.py +++ b/samples/snippets/create_data_labeling_job_specialist_pool_sample.py @@ -29,6 +29,7 @@ def create_data_labeling_job_specialist_pool_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_data_labeling_job_video_sample.py b/samples/snippets/create_data_labeling_job_video_sample.py index 91edb975c7..5d4fd8d90a 100644 --- a/samples/snippets/create_data_labeling_job_video_sample.py +++ b/samples/snippets/create_data_labeling_job_video_sample.py @@ -27,6 +27,7 @@ def create_data_labeling_job_video_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_image_sample.py b/samples/snippets/create_dataset_image_sample.py index 513b8a7f83..ee1e0f500d 100644 --- a/samples/snippets/create_dataset_image_sample.py +++ b/samples/snippets/create_dataset_image_sample.py @@ -23,6 +23,7 @@ def create_dataset_image_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_sample.py b/samples/snippets/create_dataset_sample.py index 97535ca4ea..c4ff505b32 100644 --- a/samples/snippets/create_dataset_sample.py +++ b/samples/snippets/create_dataset_sample.py @@ -24,6 +24,7 @@ def create_dataset_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_tabular_bigquery_sample.py b/samples/snippets/create_dataset_tabular_bigquery_sample.py index 821eb00b68..3c1b2bb869 100644 --- a/samples/snippets/create_dataset_tabular_bigquery_sample.py +++ b/samples/snippets/create_dataset_tabular_bigquery_sample.py @@ -26,6 +26,7 @@ def create_dataset_tabular_bigquery_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_tabular_gcs_sample.py b/samples/snippets/create_dataset_tabular_gcs_sample.py index f16aac2c24..071439a24e 100644 --- a/samples/snippets/create_dataset_tabular_gcs_sample.py +++ b/samples/snippets/create_dataset_tabular_gcs_sample.py @@ -26,6 +26,7 @@ def create_dataset_tabular_gcs_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_text_sample.py b/samples/snippets/create_dataset_text_sample.py index b83f7cfa34..b8df907f18 100644 --- a/samples/snippets/create_dataset_text_sample.py +++ b/samples/snippets/create_dataset_text_sample.py @@ -23,6 +23,7 @@ def create_dataset_text_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_dataset_video_sample.py b/samples/snippets/create_dataset_video_sample.py index af363bde8e..fe65dcbe60 100644 --- a/samples/snippets/create_dataset_video_sample.py +++ b/samples/snippets/create_dataset_video_sample.py @@ -23,6 +23,7 @@ def create_dataset_video_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_endpoint_sample.py b/samples/snippets/create_endpoint_sample.py index 82c5ab1eb2..c88a17d64d 100644 --- a/samples/snippets/create_endpoint_sample.py +++ b/samples/snippets/create_endpoint_sample.py @@ -23,6 +23,7 @@ def create_endpoint_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py index 8ffe7cd9b8..41e8606178 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py +++ b/samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py @@ -25,6 +25,7 @@ def create_hyperparameter_tuning_job_python_package_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_hyperparameter_tuning_job_sample.py b/samples/snippets/create_hyperparameter_tuning_job_sample.py index 38b1b720f9..b1b99144d9 100644 --- a/samples/snippets/create_hyperparameter_tuning_job_sample.py +++ b/samples/snippets/create_hyperparameter_tuning_job_sample.py @@ -23,6 +23,7 @@ def create_hyperparameter_tuning_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_custom_job_sample.py b/samples/snippets/create_training_pipeline_custom_job_sample.py index e12a98b9ea..9ffbd106a2 100644 --- a/samples/snippets/create_training_pipeline_custom_job_sample.py +++ b/samples/snippets/create_training_pipeline_custom_job_sample.py @@ -27,6 +27,7 @@ def create_training_pipeline_custom_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py index b8966697a1..0aaad980dc 100644 --- a/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py +++ b/samples/snippets/create_training_pipeline_custom_training_managed_dataset_sample.py @@ -30,6 +30,7 @@ def create_training_pipeline_custom_training_managed_dataset_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index 8542a6b843..28b407927e 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_image_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample.py b/samples/snippets/create_training_pipeline_image_object_detection_sample.py index 2e6ca494f9..9337e82c0d 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_image_object_detection_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_sample.py b/samples/snippets/create_training_pipeline_sample.py index eddcf7b775..1c6c695906 100644 --- a/samples/snippets/create_training_pipeline_sample.py +++ b/samples/snippets/create_training_pipeline_sample.py @@ -27,6 +27,7 @@ def create_training_pipeline_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_tabular_classification_sample.py b/samples/snippets/create_training_pipeline_tabular_classification_sample.py index 717e499e5a..7255f5726a 100644 --- a/samples/snippets/create_training_pipeline_tabular_classification_sample.py +++ b/samples/snippets/create_training_pipeline_tabular_classification_sample.py @@ -27,6 +27,7 @@ def create_training_pipeline_tabular_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_tabular_regression_sample.py b/samples/snippets/create_training_pipeline_tabular_regression_sample.py index d2583bc706..02607c2788 100644 --- a/samples/snippets/create_training_pipeline_tabular_regression_sample.py +++ b/samples/snippets/create_training_pipeline_tabular_regression_sample.py @@ -27,6 +27,7 @@ def create_training_pipeline_tabular_regression_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_text_classification_sample.py b/samples/snippets/create_training_pipeline_text_classification_sample.py index bc58db2b15..f18579d659 100644 --- a/samples/snippets/create_training_pipeline_text_classification_sample.py +++ b/samples/snippets/create_training_pipeline_text_classification_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_text_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py index 3140ef76ec..10ee43dc64 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_text_entity_extraction_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py index 1908828e4b..4ac221fe5d 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_text_sentiment_analysis_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py index aff9f5059b..facc8c1afc 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -27,6 +27,7 @@ def create_training_pipeline_video_action_recognition_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_video_classification_sample.py b/samples/snippets/create_training_pipeline_video_classification_sample.py index 6d427d8b78..6ff8c03ac6 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_video_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py index 03d3bbeb11..52cfb4714a 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py @@ -26,6 +26,7 @@ def create_training_pipeline_video_object_tracking_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_batch_prediction_job_sample.py b/samples/snippets/delete_batch_prediction_job_sample.py index 51121d2c5e..f94c8f8c47 100644 --- a/samples/snippets/delete_batch_prediction_job_sample.py +++ b/samples/snippets/delete_batch_prediction_job_sample.py @@ -23,6 +23,7 @@ def delete_batch_prediction_job_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_custom_job_sample.py b/samples/snippets/delete_custom_job_sample.py index a5bb0f18fe..27071eae05 100644 --- a/samples/snippets/delete_custom_job_sample.py +++ b/samples/snippets/delete_custom_job_sample.py @@ -23,6 +23,7 @@ def delete_custom_job_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_data_labeling_job_sample.py b/samples/snippets/delete_data_labeling_job_sample.py index 4917886210..504f6362cc 100644 --- a/samples/snippets/delete_data_labeling_job_sample.py +++ b/samples/snippets/delete_data_labeling_job_sample.py @@ -23,6 +23,7 @@ def delete_data_labeling_job_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_dataset_sample.py b/samples/snippets/delete_dataset_sample.py index 722a49a5cf..482ca98266 100644 --- a/samples/snippets/delete_dataset_sample.py +++ b/samples/snippets/delete_dataset_sample.py @@ -23,6 +23,7 @@ def delete_dataset_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_endpoint_sample.py b/samples/snippets/delete_endpoint_sample.py index b0ce2589e1..783890fb97 100644 --- a/samples/snippets/delete_endpoint_sample.py +++ b/samples/snippets/delete_endpoint_sample.py @@ -23,6 +23,7 @@ def delete_endpoint_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_hyperparameter_tuning_job_sample.py b/samples/snippets/delete_hyperparameter_tuning_job_sample.py index 4bbd651227..9d981f0f79 100644 --- a/samples/snippets/delete_hyperparameter_tuning_job_sample.py +++ b/samples/snippets/delete_hyperparameter_tuning_job_sample.py @@ -23,6 +23,7 @@ def delete_hyperparameter_tuning_job_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_model_sample.py b/samples/snippets/delete_model_sample.py index 7ecbe681db..8142c445df 100644 --- a/samples/snippets/delete_model_sample.py +++ b/samples/snippets/delete_model_sample.py @@ -23,6 +23,7 @@ def delete_model_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/delete_training_pipeline_sample.py b/samples/snippets/delete_training_pipeline_sample.py index 08dfec20a4..79ea9cec86 100644 --- a/samples/snippets/delete_training_pipeline_sample.py +++ b/samples/snippets/delete_training_pipeline_sample.py @@ -23,6 +23,7 @@ def delete_training_pipeline_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/deploy_model_custom_trained_model_sample.py b/samples/snippets/deploy_model_custom_trained_model_sample.py index 439bdc802e..bd1b199d34 100644 --- a/samples/snippets/deploy_model_custom_trained_model_sample.py +++ b/samples/snippets/deploy_model_custom_trained_model_sample.py @@ -25,6 +25,7 @@ def deploy_model_custom_trained_model_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 7200, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/deploy_model_sample.py b/samples/snippets/deploy_model_sample.py index 1cb8bef552..c14e3c64df 100644 --- a/samples/snippets/deploy_model_sample.py +++ b/samples/snippets/deploy_model_sample.py @@ -25,6 +25,7 @@ def deploy_model_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 7200, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/explain_tabular_sample.py b/samples/snippets/explain_tabular_sample.py index c0997d77b6..cde1ec941e 100644 --- a/samples/snippets/explain_tabular_sample.py +++ b/samples/snippets/explain_tabular_sample.py @@ -27,6 +27,7 @@ def explain_tabular_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/export_model_sample.py b/samples/snippets/export_model_sample.py index ec083c778e..42d8369909 100644 --- a/samples/snippets/export_model_sample.py +++ b/samples/snippets/export_model_sample.py @@ -24,6 +24,7 @@ def export_model_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/export_model_tabular_classification_sample.py b/samples/snippets/export_model_tabular_classification_sample.py index 5d1c0d1455..5da02ef5b8 100644 --- a/samples/snippets/export_model_tabular_classification_sample.py +++ b/samples/snippets/export_model_tabular_classification_sample.py @@ -24,6 +24,7 @@ def export_model_tabular_classification_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/export_model_video_action_recognition_sample.py b/samples/snippets/export_model_video_action_recognition_sample.py index 570f82fba5..930a882c0a 100644 --- a/samples/snippets/export_model_video_action_recognition_sample.py +++ b/samples/snippets/export_model_video_action_recognition_sample.py @@ -25,6 +25,7 @@ def export_model_video_action_recognition_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_batch_prediction_job_sample.py b/samples/snippets/get_batch_prediction_job_sample.py index 7e840b4a65..dd4e51363e 100644 --- a/samples/snippets/get_batch_prediction_job_sample.py +++ b/samples/snippets/get_batch_prediction_job_sample.py @@ -22,6 +22,7 @@ def get_batch_prediction_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_custom_job_sample.py b/samples/snippets/get_custom_job_sample.py index 4fcce9aa16..261a536e54 100644 --- a/samples/snippets/get_custom_job_sample.py +++ b/samples/snippets/get_custom_job_sample.py @@ -22,6 +22,7 @@ def get_custom_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_hyperparameter_tuning_job_sample.py b/samples/snippets/get_hyperparameter_tuning_job_sample.py index a9378533a1..843e284c08 100644 --- a/samples/snippets/get_hyperparameter_tuning_job_sample.py +++ b/samples/snippets/get_hyperparameter_tuning_job_sample.py @@ -22,6 +22,7 @@ def get_hyperparameter_tuning_job_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_image_classification_sample.py b/samples/snippets/get_model_evaluation_image_classification_sample.py index 8271f8ee69..9fe35aa60e 100644 --- a/samples/snippets/get_model_evaluation_image_classification_sample.py +++ b/samples/snippets/get_model_evaluation_image_classification_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_image_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_image_object_detection_sample.py b/samples/snippets/get_model_evaluation_image_object_detection_sample.py index fb7a4f06b3..d6a4c5cc9a 100644 --- a/samples/snippets/get_model_evaluation_image_object_detection_sample.py +++ b/samples/snippets/get_model_evaluation_image_object_detection_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_image_object_detection_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_sample.py b/samples/snippets/get_model_evaluation_sample.py index 8616e15e62..ce1c8d4a2d 100644 --- a/samples/snippets/get_model_evaluation_sample.py +++ b/samples/snippets/get_model_evaluation_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_slice_sample.py b/samples/snippets/get_model_evaluation_slice_sample.py index f1d0f2f850..e79e787c7d 100644 --- a/samples/snippets/get_model_evaluation_slice_sample.py +++ b/samples/snippets/get_model_evaluation_slice_sample.py @@ -24,6 +24,7 @@ def get_model_evaluation_slice_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_tabular_classification_sample.py b/samples/snippets/get_model_evaluation_tabular_classification_sample.py index ade0345ac5..2dd870f89f 100644 --- a/samples/snippets/get_model_evaluation_tabular_classification_sample.py +++ b/samples/snippets/get_model_evaluation_tabular_classification_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_tabular_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_tabular_regression_sample.py b/samples/snippets/get_model_evaluation_tabular_regression_sample.py index 65d717fa3a..b08bcd5a45 100644 --- a/samples/snippets/get_model_evaluation_tabular_regression_sample.py +++ b/samples/snippets/get_model_evaluation_tabular_regression_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_tabular_regression_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_text_classification_sample.py b/samples/snippets/get_model_evaluation_text_classification_sample.py index 4fb60d4e18..cdaea5c988 100644 --- a/samples/snippets/get_model_evaluation_text_classification_sample.py +++ b/samples/snippets/get_model_evaluation_text_classification_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_text_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_text_entity_extraction_sample.py b/samples/snippets/get_model_evaluation_text_entity_extraction_sample.py index 4f93953399..1fff1fa767 100644 --- a/samples/snippets/get_model_evaluation_text_entity_extraction_sample.py +++ b/samples/snippets/get_model_evaluation_text_entity_extraction_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_text_entity_extraction_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_text_sentiment_analysis_sample.py b/samples/snippets/get_model_evaluation_text_sentiment_analysis_sample.py index 977d755114..bb8521c1b1 100644 --- a/samples/snippets/get_model_evaluation_text_sentiment_analysis_sample.py +++ b/samples/snippets/get_model_evaluation_text_sentiment_analysis_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_text_sentiment_analysis_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_video_action_recognition_sample.py b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py index 10fde4d286..3233b45677 100644 --- a/samples/snippets/get_model_evaluation_video_action_recognition_sample.py +++ b/samples/snippets/get_model_evaluation_video_action_recognition_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_video_action_recognition_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_video_classification_sample.py b/samples/snippets/get_model_evaluation_video_classification_sample.py index 6a0f5c8143..3f0eb65566 100644 --- a/samples/snippets/get_model_evaluation_video_classification_sample.py +++ b/samples/snippets/get_model_evaluation_video_classification_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_video_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_evaluation_video_object_tracking_sample.py b/samples/snippets/get_model_evaluation_video_object_tracking_sample.py index 4fd4b62958..493b3730dd 100644 --- a/samples/snippets/get_model_evaluation_video_object_tracking_sample.py +++ b/samples/snippets/get_model_evaluation_video_object_tracking_sample.py @@ -23,6 +23,7 @@ def get_model_evaluation_video_object_tracking_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_model_sample.py b/samples/snippets/get_model_sample.py index 6d56f90304..0524e8ce9d 100644 --- a/samples/snippets/get_model_sample.py +++ b/samples/snippets/get_model_sample.py @@ -22,6 +22,7 @@ def get_model_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/get_training_pipeline_sample.py b/samples/snippets/get_training_pipeline_sample.py index 52317e1e95..19bab96cfb 100644 --- a/samples/snippets/get_training_pipeline_sample.py +++ b/samples/snippets/get_training_pipeline_sample.py @@ -22,6 +22,7 @@ def get_training_pipeline_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_image_classification_single_label_sample.py b/samples/snippets/import_data_image_classification_single_label_sample.py index d4289820d5..eec6efca4e 100644 --- a/samples/snippets/import_data_image_classification_single_label_sample.py +++ b/samples/snippets/import_data_image_classification_single_label_sample.py @@ -24,6 +24,7 @@ def import_data_image_classification_single_label_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_image_object_detection_sample.py b/samples/snippets/import_data_image_object_detection_sample.py index ce6297268b..8ffc4b2e5a 100644 --- a/samples/snippets/import_data_image_object_detection_sample.py +++ b/samples/snippets/import_data_image_object_detection_sample.py @@ -24,6 +24,7 @@ def import_data_image_object_detection_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_sample.py b/samples/snippets/import_data_sample.py index f712bc59ad..0e3581045d 100644 --- a/samples/snippets/import_data_sample.py +++ b/samples/snippets/import_data_sample.py @@ -25,6 +25,7 @@ def import_data_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_text_classification_single_label_sample.py b/samples/snippets/import_data_text_classification_single_label_sample.py index 0579531271..a1fdea9563 100644 --- a/samples/snippets/import_data_text_classification_single_label_sample.py +++ b/samples/snippets/import_data_text_classification_single_label_sample.py @@ -24,6 +24,7 @@ def import_data_text_classification_single_label_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_text_entity_extraction_sample.py b/samples/snippets/import_data_text_entity_extraction_sample.py index f322f918b1..ad7958e3ac 100644 --- a/samples/snippets/import_data_text_entity_extraction_sample.py +++ b/samples/snippets/import_data_text_entity_extraction_sample.py @@ -24,6 +24,7 @@ def import_data_text_entity_extraction_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_text_sentiment_analysis_sample.py b/samples/snippets/import_data_text_sentiment_analysis_sample.py index 2d9ee850fb..6a42e9f5ef 100644 --- a/samples/snippets/import_data_text_sentiment_analysis_sample.py +++ b/samples/snippets/import_data_text_sentiment_analysis_sample.py @@ -24,6 +24,7 @@ def import_data_text_sentiment_analysis_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_video_action_recognition_sample.py b/samples/snippets/import_data_video_action_recognition_sample.py index ccc4ec1de9..65d3b50f42 100644 --- a/samples/snippets/import_data_video_action_recognition_sample.py +++ b/samples/snippets/import_data_video_action_recognition_sample.py @@ -24,6 +24,7 @@ def import_data_video_action_recognition_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_video_classification_sample.py b/samples/snippets/import_data_video_classification_sample.py index 6162d6b662..714bd5e5a2 100644 --- a/samples/snippets/import_data_video_classification_sample.py +++ b/samples/snippets/import_data_video_classification_sample.py @@ -24,6 +24,7 @@ def import_data_video_classification_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/import_data_video_object_tracking_sample.py b/samples/snippets/import_data_video_object_tracking_sample.py index 65dd09bd9e..7aa254d042 100644 --- a/samples/snippets/import_data_video_object_tracking_sample.py +++ b/samples/snippets/import_data_video_object_tracking_sample.py @@ -24,6 +24,7 @@ def import_data_video_object_tracking_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/list_model_evaluation_slices_sample.py b/samples/snippets/list_model_evaluation_slices_sample.py index 5ae1f2212d..8285999aca 100644 --- a/samples/snippets/list_model_evaluation_slices_sample.py +++ b/samples/snippets/list_model_evaluation_slices_sample.py @@ -23,6 +23,7 @@ def list_model_evaluation_slices_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_custom_trained_model_sample.py b/samples/snippets/predict_custom_trained_model_sample.py index 46fd81f03b..3db7e52dd4 100644 --- a/samples/snippets/predict_custom_trained_model_sample.py +++ b/samples/snippets/predict_custom_trained_model_sample.py @@ -27,6 +27,7 @@ def predict_custom_trained_model_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index cee06ac5c3..b07a7b9669 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -27,6 +27,7 @@ def predict_image_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_image_object_detection_sample.py b/samples/snippets/predict_image_object_detection_sample.py index 6fb7e065f8..82561581b3 100644 --- a/samples/snippets/predict_image_object_detection_sample.py +++ b/samples/snippets/predict_image_object_detection_sample.py @@ -27,6 +27,7 @@ def predict_image_object_detection_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_sample.py b/samples/snippets/predict_sample.py index 756077e25e..9ac48f271f 100644 --- a/samples/snippets/predict_sample.py +++ b/samples/snippets/predict_sample.py @@ -28,6 +28,7 @@ def predict_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_tabular_classification_sample.py b/samples/snippets/predict_tabular_classification_sample.py index 31533a0b79..9b8182ff35 100644 --- a/samples/snippets/predict_tabular_classification_sample.py +++ b/samples/snippets/predict_tabular_classification_sample.py @@ -27,6 +27,7 @@ def predict_tabular_classification_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_tabular_regression_sample.py b/samples/snippets/predict_tabular_regression_sample.py index 475b02432b..8c564b2b83 100644 --- a/samples/snippets/predict_tabular_regression_sample.py +++ b/samples/snippets/predict_tabular_regression_sample.py @@ -27,6 +27,7 @@ def predict_tabular_regression_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_text_classification_single_label_sample.py b/samples/snippets/predict_text_classification_single_label_sample.py index b0f5bac9fb..3cd9e74e6f 100644 --- a/samples/snippets/predict_text_classification_single_label_sample.py +++ b/samples/snippets/predict_text_classification_single_label_sample.py @@ -25,6 +25,7 @@ def predict_text_classification_single_label_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_text_entity_extraction_sample.py b/samples/snippets/predict_text_entity_extraction_sample.py index a78566ea06..ae7b352af7 100644 --- a/samples/snippets/predict_text_entity_extraction_sample.py +++ b/samples/snippets/predict_text_entity_extraction_sample.py @@ -25,6 +25,7 @@ def predict_text_entity_extraction_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/predict_text_sentiment_analysis_sample.py b/samples/snippets/predict_text_sentiment_analysis_sample.py index d899a263fd..2aac58d984 100644 --- a/samples/snippets/predict_text_sentiment_analysis_sample.py +++ b/samples/snippets/predict_text_sentiment_analysis_sample.py @@ -25,6 +25,7 @@ def predict_text_sentiment_analysis_sample( location: str = "us-central1", api_endpoint: str = "us-central1-prediction-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/upload_model_explain_image_managed_container_sample.py b/samples/snippets/upload_model_explain_image_managed_container_sample.py index 0b5f46533a..8fddfc5a31 100644 --- a/samples/snippets/upload_model_explain_image_managed_container_sample.py +++ b/samples/snippets/upload_model_explain_image_managed_container_sample.py @@ -27,6 +27,7 @@ def upload_model_explain_image_managed_container_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/upload_model_explain_tabular_managed_container_sample.py b/samples/snippets/upload_model_explain_tabular_managed_container_sample.py index 5d2d56a868..5449987bd8 100644 --- a/samples/snippets/upload_model_explain_tabular_managed_container_sample.py +++ b/samples/snippets/upload_model_explain_tabular_managed_container_sample.py @@ -28,6 +28,7 @@ def upload_model_explain_tabular_managed_container_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 300, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/upload_model_sample.py b/samples/snippets/upload_model_sample.py index 9bb099ec17..924514d919 100644 --- a/samples/snippets/upload_model_sample.py +++ b/samples/snippets/upload_model_sample.py @@ -26,6 +26,7 @@ def upload_model_sample( api_endpoint: str = "us-central1-aiplatform.googleapis.com", timeout: int = 1800, ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. From 1a302d232d868a96bf6a41cbf92a550edcdb0673 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Thu, 17 Dec 2020 09:20:45 -0800 Subject: [PATCH 25/34] feat: adds function/method enhancements, demo samples (#122) * feat: adds function/method enhancements --- .github/CODEOWNERS | 3 + google/cloud/aiplatform/helpers/__init__.py | 3 + .../cloud/aiplatform/helpers/_decorators.py | 70 +++++++++++++++ .../aiplatform/helpers/value_converter.py | 60 +++++++++++++ .../schema/predict/instance/__init__.py | 3 + .../v1beta1/schema/predict/params/__init__.py | 3 + .../schema/predict/prediction/__init__.py | 3 + .../types/text_sentiment.py | 10 +-- .../schema/trainingjob/definition/__init__.py | 3 + .../types/automl_forecasting.py | 14 +-- .../definition_v1beta1/types/automl_tables.py | 10 +-- ...ng_pipeline_image_classification_sample.py | 19 ++-- .../predict_image_classification_sample.py | 29 ++++--- ...redict_image_classification_sample_test.py | 2 +- synth.py | 15 ++++ .../enhanced_library/test_enhanced_types.py | 36 ++++++++ .../enhanced_library/test_value_converter.py | 87 +++++++++++++++++++ 17 files changed, 331 insertions(+), 39 deletions(-) create mode 100644 google/cloud/aiplatform/helpers/__init__.py create mode 100644 google/cloud/aiplatform/helpers/_decorators.py create mode 100644 google/cloud/aiplatform/helpers/value_converter.py create mode 100644 tests/unit/enhanced_library/test_enhanced_types.py create mode 100644 tests/unit/enhanced_library/test_value_converter.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 290aabe045..bb1666f917 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,3 +9,6 @@ # The python-samples-owners team is the default owner for samples /samples/**/*.py @dizcology @googleapis/python-samples-owners + +# The enhanced client library tests are owned by @telpirion +/tests/unit/enhanced_library/*.py @telpirion \ No newline at end of file diff --git a/google/cloud/aiplatform/helpers/__init__.py b/google/cloud/aiplatform/helpers/__init__.py new file mode 100644 index 0000000000..3f031f2bb4 --- /dev/null +++ b/google/cloud/aiplatform/helpers/__init__.py @@ -0,0 +1,3 @@ +from google.cloud.aiplatform.helpers import value_converter + +__all__ = (value_converter,) diff --git a/google/cloud/aiplatform/helpers/_decorators.py b/google/cloud/aiplatform/helpers/_decorators.py new file mode 100644 index 0000000000..5d9aa28bea --- /dev/null +++ b/google/cloud/aiplatform/helpers/_decorators.py @@ -0,0 +1,70 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from google.cloud.aiplatform.helpers import value_converter + +from proto.marshal import Marshal +from proto.marshal.rules.struct import ValueRule +from google.protobuf.struct_pb2 import Value + + +class ConversionValueRule(ValueRule): + def to_python(self, value, *, absent: bool = None): + return super().to_python(value, absent=absent) + + def to_proto(self, value): + + # Need to check whether value is an instance + # of an enhanced type + if callable(getattr(value, "to_value", None)): + return value.to_value() + else: + return super().to_proto(value) + + +def _add_methods_to_classes_in_package(pkg): + classes = dict( + [(name, cls) for name, cls in pkg.__dict__.items() if isinstance(cls, type)] + ) + + for class_name, cls in classes.items(): + # Add to_value() method to class with docstring + setattr(cls, "to_value", value_converter.to_value) + cls.to_value.__doc__ = value_converter.to_value.__doc__ + + # Add from_value() method to class with docstring + setattr(cls, "from_value", _add_from_value_to_class(cls)) + cls.from_value.__doc__ = value_converter.from_value.__doc__ + + # Add from_map() method to class with docstring + setattr(cls, "from_map", _add_from_map_to_class(cls)) + cls.from_map.__doc__ = value_converter.from_map.__doc__ + + +def _add_from_value_to_class(cls): + def _from_value(value): + return value_converter.from_value(cls, value) + + return _from_value + + +def _add_from_map_to_class(cls): + def _from_map(map_): + return value_converter.from_map(cls, map_) + + return _from_map + + +marshal = Marshal(name="google.cloud.aiplatform.v1beta1") +marshal.register(Value, ConversionValueRule(marshal=marshal)) diff --git a/google/cloud/aiplatform/helpers/value_converter.py b/google/cloud/aiplatform/helpers/value_converter.py new file mode 100644 index 0000000000..99d56d8b6c --- /dev/null +++ b/google/cloud/aiplatform/helpers/value_converter.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from google.protobuf.struct_pb2 import Value +from google.protobuf import json_format +from proto.marshal.collections.maps import MapComposite +from proto.marshal import Marshal +from proto import Message +from proto.message import MessageMeta + + +def to_value(self: Message) -> Value: + """Converts a message type to a :class:`~google.protobuf.struct_pb2.Value` object. + + Args: + message: the message to convert + + Returns: + the message as a :class:`~google.protobuf.struct_pb2.Value` object + """ + tmp_dict = json_format.MessageToDict(self._pb) + return json_format.ParseDict(tmp_dict, Value()) + + +def from_value(cls: MessageMeta, value: Value) -> Message: + """Creates instance of class from a :class:`~google.protobuf.struct_pb2.Value` object. + + Args: + value: a :class:`~google.protobuf.struct_pb2.Value` object + + Returns: + Instance of class + """ + value_dict = json_format.MessageToDict(value) + return json_format.ParseDict(value_dict, cls()._pb) + + +def from_map(cls: MessageMeta, map_: MapComposite) -> Message: + """Creates instance of class from a :class:`~proto.marshal.collections.maps.MapComposite` object. + + Args: + map_: a :class:`~proto.marshal.collections.maps.MapComposite` object + + Returns: + Instance of class + """ + marshal = Marshal(name="marshal") + pb = marshal.to_proto(Value, map_) + return from_value(cls, pb) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py index 2f514ac4ed..095807df0c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.aiplatform.helpers import _decorators +import google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types as pkg from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ( ImageClassificationPredictionInstance, @@ -54,3 +56,4 @@ "VideoClassificationPredictionInstance", "VideoObjectTrackingPredictionInstance", ) +_decorators._add_methods_to_classes_in_package(pkg) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py index dc7cd58e9a..30a25cc3c8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.aiplatform.helpers import _decorators +import google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types as pkg from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ( ImageClassificationPredictionParams, @@ -42,3 +44,4 @@ "VideoClassificationPredictionParams", "VideoObjectTrackingPredictionParams", ) +_decorators._add_methods_to_classes_in_package(pkg) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py index 4447d3770a..50966a087f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.aiplatform.helpers import _decorators +import google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types as pkg from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ( ClassificationPredictionResult, @@ -62,3 +64,4 @@ "VideoClassificationPredictionResult", "VideoObjectTrackingPredictionResult", ) +_decorators._add_methods_to_classes_in_package(pkg) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py index 192e50419d..39ef21bf21 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -17,8 +17,10 @@ import proto # type: ignore - -from google.cloud.aiplatform.v1beta1.schema.predict.instance import text_sentiment_pb2 as gcaspi_text_sentiment # type: ignore +# DO NOT OVERWRITE FOLLOWING LINE: it was manually edited. +from google.cloud.aiplatform.v1beta1.schema.predict.instance import ( + TextSentimentPredictionInstance, +) __protobuf__ = proto.module( @@ -57,9 +59,7 @@ class Prediction(proto.Message): sentiment = proto.Field(proto.INT32, number=1) instance = proto.Field( - proto.MESSAGE, - number=1, - message=gcaspi_text_sentiment.TextSentimentPredictionInstance, + proto.MESSAGE, number=1, message=TextSentimentPredictionInstance, ) prediction = proto.Field(proto.MESSAGE, number=2, message=Prediction,) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py index abd693172a..9ebfc71841 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.aiplatform.helpers import _decorators +import google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types as pkg from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( AutoMlForecasting, @@ -130,3 +132,4 @@ "AutoMlVideoObjectTrackingInputs", "ExportEvaluatedDataItemsConfig", ) +_decorators._add_methods_to_classes_in_package(pkg) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py index 40c549dc5f..710793c9a7 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py @@ -78,14 +78,14 @@ class AutoMlForecastingInputs(proto.Message): function over the validation set. The supported optimization objectives: - "minimize-rmse" (default) - Minimize root- + "minimize-rmse" (default) - Minimize root- mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize- rmsle" - Minimize root-mean-squared log error (RMSLE). "minimize-rmspe" - Minimize root- mean-squared percentage error (RMSPE). "minimize-wape-mae" - Minimize the combination - of weighted absolute percentage error (WAPE) + of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). train_budget_milli_node_hours (int): Required. The train budget of creating this @@ -418,11 +418,11 @@ class Period(proto.Message): unit (str): The time granularity unit of this time period. The supported unit are: - "hour" - "day" - "week" - "month" - "year". + "hour" + "day" + "week" + "month" + "year". quantity (int): The number of units per period, e.g. 3 weeks or 2 months. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index 55d620b32e..f924979bd6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -61,7 +61,7 @@ class AutoMlTablesInputs(proto.Message): produce. "classification" - Predict one out of multiple target values is picked for each row. - "regression" - Predict a value based on its + "regression" - Predict a value based on its relation to other values. This type is available only to columns that contain semantically numeric values, i.e. integers or @@ -87,11 +87,11 @@ class AutoMlTablesInputs(proto.Message): the prediction type. If the field is not set, a default objective function is used. classification (binary): - "maximize-au-roc" (default) - Maximize the + "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. - "maximize-au-prc" - Maximize the area under + "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize- precision-at-recall" - Maximize precision for a specified @@ -99,10 +99,10 @@ class AutoMlTablesInputs(proto.Message): Maximize recall for a specified precision value. classification (multi-class): - "minimize-log-loss" (default) - Minimize log + "minimize-log-loss" (default) - Minimize log loss. regression: - "minimize-rmse" (default) - Minimize root- + "minimize-rmse" (default) - Minimize root- mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize- rmsle" - Minimize root-mean-squared log error diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index 28b407927e..d97ccbca84 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -14,8 +14,8 @@ # [START aiplatform_create_training_pipeline_image_classification_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.v1beta1.schema.trainingjob import definition +ModelType = definition.AutoMlImageClassificationInputs().ModelType def create_training_pipeline_image_classification_sample( @@ -31,13 +31,14 @@ def create_training_pipeline_image_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = { - "multiLabel": True, - "modelType": "CLOUD", - "budgetMilliNodeHours": 8000, - "disableEarlyStopping": False, - } - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + + icn_training_inputs = definition.AutoMlImageClassificationInputs( + multi_label=True, + model_type=ModelType.CLOUD, + budget_milli_node_hours=8000, + disable_early_stopping=False + ) + training_task_inputs = icn_training_inputs.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index b07a7b9669..f0e31ff1dc 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -16,8 +16,9 @@ import base64 from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.v1beta1.schema.predict import instance +from google.cloud.aiplatform.v1beta1.schema.predict import params +from google.cloud.aiplatform.v1beta1.schema.predict import prediction def predict_image_classification_sample( @@ -37,25 +38,29 @@ def predict_image_classification_sample( # The format of each instance should conform to the deployed model's prediction input schema. encoded_content = base64.b64encode(file_content).decode("utf-8") - instance_dict = {"content": encoded_content} - instance = json_format.ParseDict(instance_dict, Value()) - instances = [instance] - # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. - parameters_dict = {"confidence_threshold": 0.5, "max_predictions": 5} - parameters = json_format.ParseDict(parameters_dict, Value()) + instance_obj = instance.ImageClassificationPredictionInstance( + content=encoded_content) + + instance_val = instance_obj.to_value() + instances = [instance_val] + + params_obj = params.ImageClassificationPredictionParams( + confidence_threshold=0.5, max_predictions=5) + endpoint = client.endpoint_path( project=project, location=location, endpoint=endpoint_id ) response = client.predict( - endpoint=endpoint, instances=instances, parameters=parameters + endpoint=endpoint, instances=instances, parameters=params_obj ) print("response") - print(" deployed_model_id:", response.deployed_model_id) + print("\tdeployed_model_id:", response.deployed_model_id) # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. predictions = response.predictions - for prediction in predictions: - print(" prediction:", dict(prediction)) + for prediction_ in predictions: + prediction_obj = prediction.ClassificationPredictionResult.from_map(prediction_) + print(prediction_obj) # [END aiplatform_predict_image_classification_sample] diff --git a/samples/snippets/predict_image_classification_sample_test.py b/samples/snippets/predict_image_classification_sample_test.py index 10e72bb386..f771af99a4 100644 --- a/samples/snippets/predict_image_classification_sample_test.py +++ b/samples/snippets/predict_image_classification_sample_test.py @@ -31,4 +31,4 @@ def test_ucaip_generated_predict_image_classification_sample(capsys): ) out, _ = capsys.readouterr() - assert 'string_value: "daisy"' in out + assert 'deployed_model_id:' in out diff --git a/synth.py b/synth.py index 107235edac..ee86460430 100644 --- a/synth.py +++ b/synth.py @@ -84,6 +84,21 @@ "request.traffic_split = traffic_split", ) + +# Generator adds a bad import statement to enhanced type; +# need to fix in post-processing steps. +s.replace( + "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py", + "text_sentiment_pb2 as gcaspi_text_sentiment # type: ignore", + "TextSentimentPredictionInstance") + +s.replace( + "google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py", + "message=gcaspi_text_sentiment.TextSentimentPredictionInstance,", + "message=TextSentimentPredictionInstance,") + + + # post processing to fix the generated reference doc from synthtool import transforms as st import re diff --git a/tests/unit/enhanced_library/test_enhanced_types.py b/tests/unit/enhanced_library/test_enhanced_types.py new file mode 100644 index 0000000000..e0a3120909 --- /dev/null +++ b/tests/unit/enhanced_library/test_enhanced_types.py @@ -0,0 +1,36 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import + +from google.cloud.aiplatform.v1beta1.schema.trainingjob import definition + +ModelType = definition.AutoMlImageClassificationInputs().ModelType +test_training_input = definition.AutoMlImageClassificationInputs( + multi_label=True, + model_type=ModelType.CLOUD, + budget_milli_node_hours=8000, + disable_early_stopping=False, +) + + +def test_exposes_to_value_method(): + assert hasattr(test_training_input, "to_value") + + +def test_exposes_from_value_method(): + assert hasattr(test_training_input, "from_value") + + +def test_exposes_from_map_method(): + assert hasattr(test_training_input, "from_map") diff --git a/tests/unit/enhanced_library/test_value_converter.py b/tests/unit/enhanced_library/test_value_converter.py new file mode 100644 index 0000000000..b39512611b --- /dev/null +++ b/tests/unit/enhanced_library/test_value_converter.py @@ -0,0 +1,87 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import + +from google.cloud.aiplatform.helpers import value_converter +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value +import proto + + +class SomeMessage(proto.Message): + test_str = proto.Field(proto.STRING, number=1) + test_int64 = proto.Field(proto.INT64, number=2) + test_bool = proto.Field(proto.BOOL, number=3) + + +class SomeInType(proto.Message): + test_map = proto.MapField(proto.STRING, proto.INT32, number=1) + + +class SomeOutType(proto.Message): + test_int = proto.Field(proto.INT32, number=1) + + +input_dict = { + "test_str": "Omnia Gallia est divisa", + "test_int64": 3, + "test_bool": True, +} +input_value = json_format.ParseDict(input_dict, Value()) +input_message = SomeMessage(input_dict) + + +def test_convert_message_to_value(): + actual_to_value_output = value_converter.to_value(input_message) + expected_type = Value() + assert isinstance(expected_type, type(actual_to_value_output)) + + actual_inner_fields = actual_to_value_output.struct_value.fields + + actual_bool_type = actual_inner_fields["test_bool"] + assert hasattr(actual_bool_type, "bool_value") + + actual_int64_type = actual_inner_fields["test_int64"] + assert hasattr(actual_int64_type, "number_value") + + actual_string_type = actual_inner_fields["test_str"] + assert hasattr(actual_string_type, "string_value") + + +def test_convert_value_to_message(): + actual_from_value_output = value_converter.from_value(SomeMessage, input_value) + expected_type = SomeMessage(input_dict) + + # TODO: compare instance of SomeMessage against + # actual_from_value_output. + # See https://github.com/googleapis/python-aiplatform/issues/136 + + # Check property-level ("duck-typing") equivalency + assert actual_from_value_output.test_str == expected_type.test_str + assert actual_from_value_output.test_bool == expected_type.test_bool + assert actual_from_value_output.test_int64 == expected_type.test_int64 + + +def test_convert_map_to_message(): + message_with_map = SomeInType() + message_with_map.test_map["test_int"] = 42 + map_composite = message_with_map.test_map + actual_output = value_converter.from_map(SomeOutType, map_composite) + + # TODO: compare instance of SomeMessage against + # actual_from_value_output. + # See https://github.com/googleapis/python-aiplatform/issues/136 + + # Check property-to-key/value equivalency + assert actual_output.test_int == map_composite["test_int"] From 1cbd4a553fb8d035f687247ce87843167bf106ad Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 21 Dec 2020 11:50:25 -0800 Subject: [PATCH 26/34] feat: add schema namespace (#140) * feat: add schema namespace --- google/cloud/aiplatform/__init__.py | 4 ++- google/cloud/aiplatform/schema/__init__.py | 25 ++++++++++++++++++ .../aiplatform/v1beta1/schema/__init__.py | 25 ++++++++++++++++++ .../v1beta1/schema/predict/__init__.py | 26 +++++++++++++++++++ .../schema/predict/instance/__init__.py | 2 +- .../v1beta1/schema/predict/params/__init__.py | 2 +- .../schema/predict/prediction/__init__.py | 4 ++- .../v1beta1/schema/trainingjob/__init__.py | 20 ++++++++++++++ .../schema/trainingjob/definition/__init__.py | 4 ++- ...ng_pipeline_image_classification_sample.py | 7 +++-- .../predict_image_classification_sample.py | 10 +++---- 11 files changed, 114 insertions(+), 15 deletions(-) create mode 100644 google/cloud/aiplatform/schema/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/predict/__init__.py create mode 100644 google/cloud/aiplatform/v1beta1/schema/trainingjob/__init__.py diff --git a/google/cloud/aiplatform/__init__.py b/google/cloud/aiplatform/__init__.py index ec30029286..2a18bec4cd 100644 --- a/google/cloud/aiplatform/__init__.py +++ b/google/cloud/aiplatform/__init__.py @@ -16,5 +16,7 @@ # from google.cloud.aiplatform import gapic +from google.cloud.aiplatform import schema -__all__ = (gapic,) + +__all__ = (gapic, schema) diff --git a/google/cloud/aiplatform/schema/__init__.py b/google/cloud/aiplatform/schema/__init__.py new file mode 100644 index 0000000000..01fd26b30a --- /dev/null +++ b/google/cloud/aiplatform/schema/__init__.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud.aiplatform.v1beta1.schema import predict +from google.cloud.aiplatform.v1beta1.schema import trainingjob + + +__all__ = ( + "predict", + "trainingjob", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/__init__.py b/google/cloud/aiplatform/v1beta1/schema/__init__.py new file mode 100644 index 0000000000..01fd26b30a --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/__init__.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud.aiplatform.v1beta1.schema import predict +from google.cloud.aiplatform.v1beta1.schema import trainingjob + + +__all__ = ( + "predict", + "trainingjob", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/__init__.py new file mode 100644 index 0000000000..c016baf1d9 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/predict/__init__.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud.aiplatform.v1beta1.schema.predict import instance +from google.cloud.aiplatform.v1beta1.schema.predict import params +from google.cloud.aiplatform.v1beta1.schema.predict import prediction + +__all__ = ( + "instance", + "params", + "prediction", +) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py index 095807df0c..3c4e8af160 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -15,7 +15,7 @@ # limitations under the License. # from google.cloud.aiplatform.helpers import _decorators -import google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types as pkg +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1 import types as pkg from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ( ImageClassificationPredictionInstance, diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py index 30a25cc3c8..45471523c9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -15,7 +15,7 @@ # limitations under the License. # from google.cloud.aiplatform.helpers import _decorators -import google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types as pkg +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1 import types as pkg from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ( ImageClassificationPredictionParams, diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py index 50966a087f..f8a4d63d58 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -15,7 +15,9 @@ # limitations under the License. # from google.cloud.aiplatform.helpers import _decorators -import google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types as pkg +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1 import ( + types as pkg, +) from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ( ClassificationPredictionResult, diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/__init__.py new file mode 100644 index 0000000000..29d9c34600 --- /dev/null +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob import definition + +__all__ = ("definition",) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py index 9ebfc71841..8c71f1f7cf 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -15,7 +15,9 @@ # limitations under the License. # from google.cloud.aiplatform.helpers import _decorators -import google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types as pkg +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1 import ( + types as pkg, +) from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import ( AutoMlForecasting, diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index d97ccbca84..9186e498d9 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_image_classification_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.v1beta1.schema.trainingjob import definition -ModelType = definition.AutoMlImageClassificationInputs().ModelType +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_image_classification_sample( @@ -32,9 +31,9 @@ def create_training_pipeline_image_classification_sample( # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - icn_training_inputs = definition.AutoMlImageClassificationInputs( + icn_training_inputs = trainingjob.definition.AutoMlImageClassificationInputs( multi_label=True, - model_type=ModelType.CLOUD, + model_type=trainingjob.definition.AutoMlImageClassificationInputs.ModelType.CLOUD, budget_milli_node_hours=8000, disable_early_stopping=False ) diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index f0e31ff1dc..126c21664b 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -16,9 +16,7 @@ import base64 from google.cloud import aiplatform -from google.cloud.aiplatform.v1beta1.schema.predict import instance -from google.cloud.aiplatform.v1beta1.schema.predict import params -from google.cloud.aiplatform.v1beta1.schema.predict import prediction +from google.cloud.aiplatform.schema import predict def predict_image_classification_sample( @@ -39,13 +37,13 @@ def predict_image_classification_sample( # The format of each instance should conform to the deployed model's prediction input schema. encoded_content = base64.b64encode(file_content).decode("utf-8") - instance_obj = instance.ImageClassificationPredictionInstance( + instance_obj = predict.instance.ImageClassificationPredictionInstance( content=encoded_content) instance_val = instance_obj.to_value() instances = [instance_val] - params_obj = params.ImageClassificationPredictionParams( + params_obj = predict.params.ImageClassificationPredictionParams( confidence_threshold=0.5, max_predictions=5) endpoint = client.endpoint_path( @@ -59,7 +57,7 @@ def predict_image_classification_sample( # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. predictions = response.predictions for prediction_ in predictions: - prediction_obj = prediction.ClassificationPredictionResult.from_map(prediction_) + prediction_obj = predict.prediction.ClassificationPredictionResult.from_map(prediction_) print(prediction_obj) From 624a08d65c2088c0d5272a7b1b88983a8c7e6284 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 22 Dec 2020 11:17:08 -0800 Subject: [PATCH 27/34] feat: update create_training_pipeline samples (#142) * feat: update create_training_pipeline samples. Use the schema types. --- .sample_configs/process_configs.yaml | 53 +++++++------------ ...ng_pipeline_image_classification_sample.py | 9 ++-- ..._pipeline_image_object_detection_sample.py | 15 +++--- ...ing_pipeline_text_classification_sample.py | 9 ++-- ..._pipeline_text_entity_extraction_sample.py | 7 ++- ...pipeline_text_sentiment_analysis_sample.py | 9 ++-- ...ipeline_video_action_recognition_sample.py | 13 +++-- ...ng_pipeline_video_classification_sample.py | 9 ++-- ...g_pipeline_video_object_tracking_sample.py | 9 ++-- 9 files changed, 59 insertions(+), 74 deletions(-) diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 7001e974ac..432abcd68d 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -69,48 +69,33 @@ create_hyperparameter_tuning_job_sample: {} create_specialist_pool_sample: {} create_training_pipeline_custom_job_sample: {} create_training_pipeline_custom_training_managed_dataset_sample: {} -create_training_pipeline_entity_extraction_sample: {} -create_training_pipeline_image_classification_sample: {} -create_training_pipeline_image_object_detection_sample: {} +create_training_pipeline_image_classification_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlImageClassificationInputs +create_training_pipeline_image_object_detection_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlImageObjectDetectionInputs create_training_pipeline_sample: {} create_training_pipeline_tabular_classification_sample: {} create_training_pipeline_tabular_regression_sample: {} -create_training_pipeline_text_classification_sample: {} +create_training_pipeline_text_classification_sample: + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextClassificationInputs create_training_pipeline_text_entity_extraction_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextExtractionInputs create_training_pipeline_text_sentiment_analysis_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlTextSentimentInputs create_training_pipeline_video_action_recognition_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoActionRecognitionInputs create_training_pipeline_video_classification_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoClassificationInputs create_training_pipeline_video_object_tracking_sample: - skip: - - predict_schemata - - supported_export_formats - - container_spec - - deployed_models - - explanation_spec + schema_types: + training_task_inputs_dict: trainingjob.definition.AutoMlVideoObjectTrackingInputs delete_batch_prediction_job_sample: {} delete_custom_job_sample: {} delete_data_labeling_job_sample: {} diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index 9186e498d9..dddd3eb5cf 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -30,14 +30,13 @@ def create_training_pipeline_image_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - - icn_training_inputs = trainingjob.definition.AutoMlImageClassificationInputs( + training_task_inputs_object = trainingjob.definition.AutoMlImageClassificationInputs( multi_label=True, - model_type=trainingjob.definition.AutoMlImageClassificationInputs.ModelType.CLOUD, + model_type="CLOUD", budget_milli_node_hours=8000, - disable_early_stopping=False + disable_early_stopping=False, ) - training_task_inputs = icn_training_inputs.to_value() + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample.py b/samples/snippets/create_training_pipeline_image_object_detection_sample.py index 9337e82c0d..bb6d243ca2 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_image_object_detection_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_image_object_detection_sample( @@ -31,12 +30,12 @@ def create_training_pipeline_image_object_detection_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = { - "modelType": "CLOUD_HIGH_ACCURACY_1", - "budgetMilliNodeHours": 20000, - "disableEarlyStopping": False, - } - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlImageObjectDetectionInputs( + model_type="CLOUD_HIGH_ACCURACY_1", + budget_milli_node_hours=20000, + disable_early_stopping=False, + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_classification_sample.py b/samples/snippets/create_training_pipeline_text_classification_sample.py index f18579d659..95de59dfba 100644 --- a/samples/snippets/create_training_pipeline_text_classification_sample.py +++ b/samples/snippets/create_training_pipeline_text_classification_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_classification_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_classification_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_text_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = ( + trainingjob.definition.AutoMlTextClassificationInputs() + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py index 10ee43dc64..131a2aba77 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_entity_extraction_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_entity_extraction_sample( @@ -31,8 +30,8 @@ def create_training_pipeline_text_entity_extraction_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlTextExtractionInputs() + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py index 4ac221fe5d..b12cd95fd7 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_text_sentiment_analysis_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_text_sentiment_analysis_sample( @@ -32,8 +31,10 @@ def create_training_pipeline_text_sentiment_analysis_sample( # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) # Use sentiment_max of 4 - training_task_inputs_dict = {"sentiment_max": 4} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlTextSentimentInputs( + sentiment_max=4 + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py index facc8c1afc..5de1ecba9c 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_action_recognition_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_action_recognition_sample( @@ -32,11 +31,11 @@ def create_training_pipeline_video_action_recognition_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = { - # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' - "modelType": model_type - } - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + training_task_inputs_object = trainingjob.definition.AutoMlVideoActionRecognitionInputs( + model_type=model_type + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_classification_sample.py b/samples/snippets/create_training_pipeline_video_classification_sample.py index 6ff8c03ac6..f404ed62d9 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_classification_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_classification_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_video_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = ( + trainingjob.definition.AutoMlVideoClassificationInputs() + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py index 52cfb4714a..562627f97c 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py @@ -14,8 +14,7 @@ # [START aiplatform_create_training_pipeline_video_object_tracking_sample] from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.schema import trainingjob def create_training_pipeline_video_object_tracking_sample( @@ -31,8 +30,10 @@ def create_training_pipeline_video_object_tracking_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_dict = {"modelType": "CLOUD"} - training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + training_task_inputs_object = trainingjob.definition.AutoMlVideoObjectTrackingInputs( + model_type="CLOUD" + ) + training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, From 69fc7fd415e1b404530fd3e2881a94c0441791cf Mon Sep 17 00:00:00 2001 From: Ivan Cheung Date: Wed, 23 Dec 2020 05:58:01 +0900 Subject: [PATCH 28/34] feat: Added tabular forecasting samples (#128) * Added predict, get_model_evaluation and create_training_pipeline samples for AutoML Forecasting * Added param handlers * Added headers manually * fix: Improved forecasting sample * Added forecasting test * Added tests for predict and get_model_evaluation * fix: Fixed create_training_pipeline_sample * feat: Added list_model_evaluations_tabular_forecasting_sample and test, fixed get_model_evaluation_tabular_forecasting_sample, and fixed create_training_pipeline_tabular_forecasting_sample * fix: Reverted back to generated BUILD_SPECIFIC_GCLOUD_PROJECT * fix: Fixed name of test * fix: Fixed lint errors * fix: Fixed assertion * fix: Removed predict samples * Consolidated samples * fix: Removed list_model_evaluations_tabular_forecasting * fix: tweaks Co-authored-by: Ivan Cheung --- ...ing_pipeline_tabular_forecasting_sample.py | 80 +++++++++++++++++ .sample_configs/process_configs.yaml | 5 ++ .sample_configs/variants.yaml | 2 + ...ing_pipeline_tabular_forecasting_sample.py | 90 +++++++++++++++++++ ...ipeline_tabular_forecasting_sample_test.py | 87 ++++++++++++++++++ 5 files changed, 264 insertions(+) create mode 100644 .sample_configs/param_handlers/create_training_pipeline_tabular_forecasting_sample.py create mode 100644 samples/snippets/create_training_pipeline_tabular_forecasting_sample.py create mode 100644 samples/snippets/create_training_pipeline_tabular_forecasting_sample_test.py diff --git a/.sample_configs/param_handlers/create_training_pipeline_tabular_forecasting_sample.py b/.sample_configs/param_handlers/create_training_pipeline_tabular_forecasting_sample.py new file mode 100644 index 0000000000..5c0f63a781 --- /dev/null +++ b/.sample_configs/param_handlers/create_training_pipeline_tabular_forecasting_sample.py @@ -0,0 +1,80 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +def make_parent(parent: str) -> str: + parent = parent + + return parent + + +def make_training_pipeline( + display_name: str, + dataset_id: str, + model_display_name: str, + target_column: str, + time_series_identifier_column: str, + time_column: str, + static_columns: str, + time_variant_past_only_columns: str, + time_variant_past_and_future_columns: str, + forecast_window_end: int, +) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: + # set the columns used for training and their data types + transformations = [ + {"auto": {"column_name": "date"}}, + {"auto": {"column_name": "state_name"}}, + {"auto": {"column_name": "county_fips_code"}}, + {"auto": {"column_name": "confirmed_cases"}}, + {"auto": {"column_name": "deaths"}}, + ] + + period = {"unit": "day", "quantity": 1} + + # the inputs should be formatted according to the training_task_definition yaml file + training_task_inputs_dict = { + # required inputs + "targetColumn": target_column, + "timeSeriesIdentifierColumn": time_series_identifier_column, + "timeColumn": time_column, + "transformations": transformations, + "period": period, + "optimizationObjective": "minimize-rmse", + "trainBudgetMilliNodeHours": 8000, + "staticColumns": static_columns, + "timeVariantPastOnlyColumns": time_variant_past_only_columns, + "timeVariantPastAndFutureColumns": time_variant_past_and_future_columns, + "forecastWindowEnd": forecast_window_end, + } + + training_task_inputs = to_protobuf_value(training_task_inputs_dict) + + training_pipeline = { + "display_name": display_name, + "training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_forecasting_1.0.0.yaml", + "training_task_inputs": training_task_inputs, + "input_data_config": { + "dataset_id": dataset_id, + "fraction_split": { + "training_fraction": 0.8, + "validation_fraction": 0.1, + "test_fraction": 0.1, + }, + }, + "model_to_upload": {"display_name": model_display_name}, + } + + return training_pipeline + diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 432abcd68d..882f2d864e 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -19,6 +19,7 @@ create_batch_prediction_job_custom_image_explain_sample: {} create_batch_prediction_job_custom_tabular_explain_sample: {} create_batch_prediction_job_sample: {} create_batch_prediction_job_tabular_explain_sample: {} +create_batch_prediction_job_tabular_forecasting_sample: {} create_batch_prediction_job_text_classification_sample: {} create_batch_prediction_job_text_entity_extraction_sample: {} create_batch_prediction_job_text_sentiment_analysis_sample: {} @@ -77,6 +78,7 @@ create_training_pipeline_image_object_detection_sample: training_task_inputs_dict: trainingjob.definition.AutoMlImageObjectDetectionInputs create_training_pipeline_sample: {} create_training_pipeline_tabular_classification_sample: {} +create_training_pipeline_tabular_forecasting_sample: {} create_training_pipeline_tabular_regression_sample: {} create_training_pipeline_text_classification_sample: schema_types: @@ -168,6 +170,7 @@ get_model_evaluation_sample: - model_explanation get_model_evaluation_slice_sample: {} get_model_evaluation_tabular_classification_sample: {} +get_model_evaluation_tabular_forecasting_sample: {} get_model_evaluation_tabular_regression_sample: {} get_model_evaluation_text_classification_sample: skip: @@ -232,6 +235,7 @@ list_endpoints_sample: {} list_hyperparameter_tuning_jobs_sample: {} list_model_evaluation_slices_sample: {} list_model_evaluations_sample: {} +list_model_evaluations_tabular_forecasting_sample: {} list_models_sample: {} list_specialist_pools_sample: {} list_training_pipelines_sample: {} @@ -274,6 +278,7 @@ predict_tabular_classification_sample: comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_classification.yaml for the format of the predictions. +predict_tabular_forecasting_sample: {} predict_tabular_regression_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 diff --git a/.sample_configs/variants.yaml b/.sample_configs/variants.yaml index 59a0fd78f5..0ef9cf7bc7 100644 --- a/.sample_configs/variants.yaml +++ b/.sample_configs/variants.yaml @@ -22,6 +22,7 @@ create_batch_prediction_job: - custom_image_explain - custom_tabular_explain - tabular_explain +- tabular_forecasting - text_classification - text_entity_extraction - text_sentiment_analysis @@ -59,6 +60,7 @@ create_training_pipeline: - image_classification - image_object_detection - tabular_classification +- tabular_forecasting - tabular_regression - text_classification - text_entity_extraction diff --git a/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py b/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py new file mode 100644 index 0000000000..5c04fccc79 --- /dev/null +++ b/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py @@ -0,0 +1,90 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_training_pipeline_tabular_forecasting_sample] +from google.cloud import aiplatform +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value + + +def create_training_pipeline_tabular_forecasting_sample( + project: str, + display_name: str, + dataset_id: str, + model_display_name: str, + target_column: str, + time_series_identifier_column: str, + time_column: str, + static_columns: str, + time_variant_past_only_columns: str, + time_variant_past_and_future_columns: str, + forecast_window_end: int, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) + # set the columns used for training and their data types + transformations = [ + {"auto": {"column_name": "date"}}, + {"auto": {"column_name": "state_name"}}, + {"auto": {"column_name": "county_fips_code"}}, + {"auto": {"column_name": "confirmed_cases"}}, + {"auto": {"column_name": "deaths"}}, + ] + + period = {"unit": "day", "quantity": 1} + + # the inputs should be formatted according to the training_task_definition yaml file + training_task_inputs_dict = { + # required inputs + "targetColumn": target_column, + "timeSeriesIdentifierColumn": time_series_identifier_column, + "timeColumn": time_column, + "transformations": transformations, + "period": period, + "optimizationObjective": "minimize-rmse", + "trainBudgetMilliNodeHours": 8000, + "staticColumns": static_columns, + "timeVariantPastOnlyColumns": time_variant_past_only_columns, + "timeVariantPastAndFutureColumns": time_variant_past_and_future_columns, + "forecastWindowEnd": forecast_window_end, + } + + training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value()) + + training_pipeline = { + "display_name": display_name, + "training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_forecasting_1.0.0.yaml", + "training_task_inputs": training_task_inputs, + "input_data_config": { + "dataset_id": dataset_id, + "fraction_split": { + "training_fraction": 0.8, + "validation_fraction": 0.1, + "test_fraction": 0.1, + }, + }, + "model_to_upload": {"display_name": model_display_name}, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_training_pipeline( + parent=parent, training_pipeline=training_pipeline + ) + print("response:", response) + + +# [END aiplatform_create_training_pipeline_tabular_forecasting_sample] diff --git a/samples/snippets/create_training_pipeline_tabular_forecasting_sample_test.py b/samples/snippets/create_training_pipeline_tabular_forecasting_sample_test.py new file mode 100644 index 0000000000..d5c58a7992 --- /dev/null +++ b/samples/snippets/create_training_pipeline_tabular_forecasting_sample_test.py @@ -0,0 +1,87 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from uuid import uuid4 + +from google.cloud import aiplatform +import pytest + +import cancel_training_pipeline_sample +import create_training_pipeline_tabular_forecasting_sample +import delete_training_pipeline_sample +import helpers + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +DATASET_ID = "3003302817130610688" # COVID Dataset +DISPLAY_NAME = f"temp_create_training_pipeline_test_{uuid4()}" +TARGET_COLUMN = "deaths" +PREDICTION_TYPE = "forecasting" + + +@pytest.fixture +def shared_state(): + state = {} + yield state + + +@pytest.fixture(scope="function", autouse=True) +def teardown(shared_state): + yield + + training_pipeline_id = shared_state["training_pipeline_name"].split("/")[-1] + + # Stop the training pipeline + cancel_training_pipeline_sample.cancel_training_pipeline_sample( + project=PROJECT_ID, training_pipeline_id=training_pipeline_id + ) + + client_options = {"api_endpoint": "us-central1-aiplatform.googleapis.com"} + pipeline_client = aiplatform.gapic.PipelineServiceClient( + client_options=client_options + ) + + # Waiting for training pipeline to be in CANCELLED state + helpers.wait_for_job_state( + get_job_method=pipeline_client.get_training_pipeline, + name=shared_state["training_pipeline_name"], + ) + + # Delete the training pipeline + delete_training_pipeline_sample.delete_training_pipeline_sample( + project=PROJECT_ID, training_pipeline_id=training_pipeline_id + ) + + +def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state): + + create_training_pipeline_tabular_forecasting_sample.create_training_pipeline_tabular_forecasting_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + dataset_id=DATASET_ID, + model_display_name="permanent_tabular_forecasting_model", + target_column=TARGET_COLUMN, + time_series_identifier_column="county", + time_column="date", + static_columns=["state_name"], + time_variant_past_only_columns=["deaths"], + time_variant_past_and_future_columns=["date"], + forecast_window_end=10, + ) + + out, _ = capsys.readouterr() + assert "response:" in out + + # Save resource name of the newly created training pipeline + shared_state["training_pipeline_name"] = helpers.get_name(out) From 7983b448158cf8166ada54c60fb896d5658a2162 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 23 Dec 2020 09:20:02 -0800 Subject: [PATCH 29/34] fix: predict image samples params (#150) --- .../predict_image_classification_sample.py | 4 +-- .../predict_image_object_detection_sample.py | 4 +-- .../predict_image_classification_sample.py | 27 +++++++++---------- .../predict_image_object_detection_sample.py | 2 +- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/.sample_configs/param_handlers/predict_image_classification_sample.py b/.sample_configs/param_handlers/predict_image_classification_sample.py index ca8f00dc13..abecfd94dc 100644 --- a/.sample_configs/param_handlers/predict_image_classification_sample.py +++ b/.sample_configs/param_handlers/predict_image_classification_sample.py @@ -32,8 +32,8 @@ def make_instances(filename: str) -> typing.Sequence[google.protobuf.struct_pb2. def make_parameters() -> google.protobuf.struct_pb2.Value: # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. parameters_dict = { - "confidence_threshold": 0.5, - "max_predictions": 5 + "confidenceThreshold": 0.5, + "maxPredictions": 5 } parameters = to_protobuf_value(parameters_dict) diff --git a/.sample_configs/param_handlers/predict_image_object_detection_sample.py b/.sample_configs/param_handlers/predict_image_object_detection_sample.py index 975558e1ab..cd897bfa5d 100644 --- a/.sample_configs/param_handlers/predict_image_object_detection_sample.py +++ b/.sample_configs/param_handlers/predict_image_object_detection_sample.py @@ -32,8 +32,8 @@ def make_instances(filename: str) -> typing.Sequence[google.protobuf.struct_pb2. def make_parameters() -> google.protobuf.struct_pb2.Value: # See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters. parameters_dict = { - "confidence_threshold": 0.5, - "max_predictions": 5 + "confidenceThreshold": 0.5, + "maxPredictions": 5 } parameters = to_protobuf_value(parameters_dict) diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index 126c21664b..958de5e156 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -16,7 +16,8 @@ import base64 from google.cloud import aiplatform -from google.cloud.aiplatform.schema import predict +from google.protobuf import json_format +from google.protobuf.struct_pb2 import Value def predict_image_classification_sample( @@ -36,29 +37,25 @@ def predict_image_classification_sample( # The format of each instance should conform to the deployed model's prediction input schema. encoded_content = base64.b64encode(file_content).decode("utf-8") + instance_dict = {"content": encoded_content} - instance_obj = predict.instance.ImageClassificationPredictionInstance( - content=encoded_content) - - instance_val = instance_obj.to_value() - instances = [instance_val] - - params_obj = predict.params.ImageClassificationPredictionParams( - confidence_threshold=0.5, max_predictions=5) - + instance = json_format.ParseDict(instance_dict, Value()) + instances = [instance] + # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. + parameters_dict = {"confidenceThreshold": 0.5, "maxPredictions": 5} + parameters = json_format.ParseDict(parameters_dict, Value()) endpoint = client.endpoint_path( project=project, location=location, endpoint=endpoint_id ) response = client.predict( - endpoint=endpoint, instances=instances, parameters=params_obj + endpoint=endpoint, instances=instances, parameters=parameters ) print("response") - print("\tdeployed_model_id:", response.deployed_model_id) + print(" deployed_model_id:", response.deployed_model_id) # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. predictions = response.predictions - for prediction_ in predictions: - prediction_obj = predict.prediction.ClassificationPredictionResult.from_map(prediction_) - print(prediction_obj) + for prediction in predictions: + print(" prediction:", dict(prediction)) # [END aiplatform_predict_image_classification_sample] diff --git a/samples/snippets/predict_image_object_detection_sample.py b/samples/snippets/predict_image_object_detection_sample.py index 82561581b3..7b1f9afd1a 100644 --- a/samples/snippets/predict_image_object_detection_sample.py +++ b/samples/snippets/predict_image_object_detection_sample.py @@ -42,7 +42,7 @@ def predict_image_object_detection_sample( instance = json_format.ParseDict(instance_dict, Value()) instances = [instance] # See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters. - parameters_dict = {"confidence_threshold": 0.5, "max_predictions": 5} + parameters_dict = {"confidenceThreshold": 0.5, "maxPredictions": 5} parameters = json_format.ParseDict(parameters_dict, Value()) endpoint = client.endpoint_path( project=project, location=location, endpoint=endpoint_id From e2577fe528be593616e018ce403e4e0df86c5057 Mon Sep 17 00:00:00 2001 From: Vinny Senthil Date: Tue, 5 Jan 2021 14:56:33 -0800 Subject: [PATCH 30/34] Update CODEOWNERS with GAPIC and SDK dev teams (#157) --- .github/CODEOWNERS | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bb1666f917..4910f51865 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,8 +7,14 @@ # yoshi-python is the default owner * @googleapis/yoshi-python +# The AI Platform GAPIC libraries are owned by Cloud AI DPE +/google/cloud/aiplatform* @googleapis/cdpe-cloudai + +# The AI Platform SDK is owned by Model Builder SDK Dev team +/google/cloud/aiplatform/* @googleapis/cloud-aiplatform-model-builder-sdk + # The python-samples-owners team is the default owner for samples /samples/**/*.py @dizcology @googleapis/python-samples-owners # The enhanced client library tests are owned by @telpirion -/tests/unit/enhanced_library/*.py @telpirion \ No newline at end of file +/tests/unit/enhanced_library/*.py @telpirion From a23857bc9be94c4a23ae7cf6f2eac75d8ea5ae95 Mon Sep 17 00:00:00 2001 From: Ivan Cheung Date: Wed, 6 Jan 2021 17:45:50 +0900 Subject: [PATCH 31/34] feat: Added tabular forecasting sample (#156) Co-authored-by: Ivan Cheung --- ...ediction_job_tabular_forecasting_sample.py | 28 ++++++++++ ...ediction_job_tabular_forecasting_sample.py | 54 +++++++++++++++++++ ...ion_job_tabular_forecasting_sample_test.py | 54 +++++++++++++++++++ 3 files changed, 136 insertions(+) create mode 100644 .sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_tabular_forecasting_sample.py create mode 100644 samples/snippets/create_batch_prediction_job_tabular_forecasting_sample_test.py diff --git a/.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py b/.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py new file mode 100644 index 0000000000..d03f13dff1 --- /dev/null +++ b/.sample_configs/param_handlers/create_batch_prediction_job_tabular_forecasting_sample.py @@ -0,0 +1,28 @@ +def make_parent(parent: str) -> str: + parent = parent + + return parent + + +def make_batch_prediction_job( + display_name: str, + model_name: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + predictions_format: str, +) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob: + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model_name, + "input_config": { + "instances_format": predictions_format, + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": predictions_format, + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + return batch_prediction_job + diff --git a/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample.py b/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample.py new file mode 100644 index 0000000000..62eee08856 --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_create_batch_prediction_job_tabular_forecasting_sample] +from google.cloud import aiplatform + + +def create_batch_prediction_job_tabular_forecasting_sample( + project: str, + display_name: str, + model_name: str, + gcs_source_uri: str, + gcs_destination_output_uri_prefix: str, + predictions_format: str, + location: str = "us-central1", + api_endpoint: str = "us-central1-aiplatform.googleapis.com", +): + # The AI Platform services require regional API endpoints. + client_options = {"api_endpoint": api_endpoint} + # Initialize client that will be used to create and send requests. + # This client only needs to be created once, and can be reused for multiple requests. + client = aiplatform.gapic.JobServiceClient(client_options=client_options) + batch_prediction_job = { + "display_name": display_name, + # Format: 'projects/{project}/locations/{location}/models/{model_id}' + "model": model_name, + "input_config": { + "instances_format": predictions_format, + "gcs_source": {"uris": [gcs_source_uri]}, + }, + "output_config": { + "predictions_format": predictions_format, + "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, + }, + } + parent = f"projects/{project}/locations/{location}" + response = client.create_batch_prediction_job( + parent=parent, batch_prediction_job=batch_prediction_job + ) + print("response:", response) + + +# [END aiplatform_create_batch_prediction_job_tabular_forecasting_sample] diff --git a/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample_test.py b/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample_test.py new file mode 100644 index 0000000000..f992cc575e --- /dev/null +++ b/samples/snippets/create_batch_prediction_job_tabular_forecasting_sample_test.py @@ -0,0 +1,54 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from uuid import uuid4 + +import pytest + +import create_batch_prediction_job_tabular_forecasting_sample +import helpers + +PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") +LOCATION = "us-central1" +MODEL_ID = "8531330622239539200" # Permanent restaurant rating model +DISPLAY_NAME = f"temp_create_batch_prediction_tabular_forecasting_test_{uuid4()}" +GCS_SOURCE_URI = "gs://cloud-samples-data/ai-platform/covid/bigquery-public-covid-nyt-us-counties-train.csv" +GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/" +PREDICTIONS_FORMAT = "csv" + + +@pytest.fixture(scope="function", autouse=True) +def teardown(teardown_batch_prediction_job): + yield + + +# Creating AutoML Tabular Forecasting Classification batch prediction job +def test_create_batch_prediction_job_tabular_forecasting_sample(capsys, shared_state): + + model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}" + + create_batch_prediction_job_tabular_forecasting_sample.create_batch_prediction_job_tabular_forecasting_sample( + project=PROJECT_ID, + display_name=DISPLAY_NAME, + model_name=model_name, + gcs_source_uri=GCS_SOURCE_URI, + gcs_destination_output_uri_prefix=GCS_OUTPUT_URI, + predictions_format=PREDICTIONS_FORMAT, + ) + + out, _ = capsys.readouterr() + + # Save resource name of the newly created batch prediction job + shared_state["batch_prediction_job_name"] = helpers.get_name(out) From 117ce24156b35ea5da772c1d68a1eb725e7faf5e Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Thu, 7 Jan 2021 13:13:34 -0700 Subject: [PATCH 32/34] test: add constraints file (#155) --- testing/constraints-3.10.txt | 0 testing/constraints-3.11.txt | 0 testing/constraints-3.6.txt | 12 ++++++++++++ testing/constraints-3.7.txt | 0 4 files changed, 12 insertions(+) create mode 100644 testing/constraints-3.10.txt create mode 100644 testing/constraints-3.11.txt create mode 100644 testing/constraints-3.6.txt create mode 100644 testing/constraints-3.7.txt diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt new file mode 100644 index 0000000000..c8bff855a0 --- /dev/null +++ b/testing/constraints-3.6.txt @@ -0,0 +1,12 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==1.22.2 +libcst==0.2.5 +proto-plus==1.10.1 +mock==4.0.2 +google-cloud-storage==1.26.0 \ No newline at end of file diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt new file mode 100644 index 0000000000..e69de29bb2 From 0c41ed2169f3f4d3313f864b7baee7aad5230e7b Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 8 Jan 2021 13:33:18 -0800 Subject: [PATCH 33/34] chore: aiplatform.schema -> aiplatform.gapic.schema (#158) --- .sample_configs/process_configs.yaml | 12 ++++++++++++ google/cloud/aiplatform/__init__.py | 3 +-- google/cloud/aiplatform/gapic/__init__.py | 1 + .../aiplatform/{ => gapic}/schema/__init__.py | 0 ...raining_pipeline_image_classification_sample.py | 7 +++---- ...ining_pipeline_image_object_detection_sample.py | 7 +++---- ...training_pipeline_tabular_forecasting_sample.py | 1 + ...training_pipeline_text_classification_sample.py | 7 +++---- ...ining_pipeline_text_entity_extraction_sample.py | 7 ++++--- ...ning_pipeline_text_sentiment_analysis_sample.py | 9 ++++----- ...ing_pipeline_video_action_recognition_sample.py | 11 +++++------ ...raining_pipeline_video_classification_sample.py | 7 +++---- ...aining_pipeline_video_object_tracking_sample.py | 9 ++++----- .../predict_image_classification_sample.py | 14 +++++++------- .../predict_image_object_detection_sample.py | 14 +++++++------- ...dict_text_classification_single_label_sample.py | 6 ++++-- .../predict_text_entity_extraction_sample.py | 6 ++++-- .../predict_text_sentiment_analysis_sample.py | 6 ++++-- 18 files changed, 70 insertions(+), 57 deletions(-) rename google/cloud/aiplatform/{ => gapic}/schema/__init__.py (100%) diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml index 882f2d864e..e89a42e4c8 100644 --- a/.sample_configs/process_configs.yaml +++ b/.sample_configs/process_configs.yaml @@ -250,6 +250,9 @@ predict_image_classification_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 resource_name: endpoint + schema_types: + instance_dict: predict.instance.ImageClassificationPredictionInstance + parameters_dict: predict.params.ImageClassificationPredictionParams comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. @@ -261,6 +264,9 @@ predict_image_object_detection_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 resource_name: endpoint + schema_types: + instance_dict: predict.instance.ImageObjectDetectionPredictionInstance + parameters_dict: predict.params.ImageObjectDetectionPredictionParams comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml for the format of the predictions. @@ -290,6 +296,8 @@ predict_text_classification_single_label_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 resource_name: endpoint + schema_types: + instance_dict: predict.instance.TextClassificationPredictionInstance comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_classification.yaml for the format of the predictions. @@ -297,6 +305,8 @@ predict_text_entity_extraction_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 resource_name: endpoint + schema_types: + instance_dict: predict.instance.TextExtractionPredictionInstance comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction.yaml for the format of the predictions. @@ -304,6 +314,8 @@ predict_text_sentiment_analysis_sample: api_endpoint: us-central1-prediction-aiplatform.googleapis.com max_depth: 1 resource_name: endpoint + schema_types: + instance_dict: predict.instance.TextSentimentPredictionInstance comments: predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_sentiment.yaml for the format of the predictions. diff --git a/google/cloud/aiplatform/__init__.py b/google/cloud/aiplatform/__init__.py index 2a18bec4cd..cc378079ab 100644 --- a/google/cloud/aiplatform/__init__.py +++ b/google/cloud/aiplatform/__init__.py @@ -16,7 +16,6 @@ # from google.cloud.aiplatform import gapic -from google.cloud.aiplatform import schema -__all__ = (gapic, schema) +__all__ = (gapic,) diff --git a/google/cloud/aiplatform/gapic/__init__.py b/google/cloud/aiplatform/gapic/__init__.py index 2691cce8e0..790ebeffdf 100644 --- a/google/cloud/aiplatform/gapic/__init__.py +++ b/google/cloud/aiplatform/gapic/__init__.py @@ -17,6 +17,7 @@ # The latest GAPIC version is exported to the google.cloud.aiplatform.gapic namespace. from google.cloud.aiplatform_v1beta1 import * +from google.cloud.aiplatform.gapic import schema from google.cloud import aiplatform_v1beta1 as v1beta1 diff --git a/google/cloud/aiplatform/schema/__init__.py b/google/cloud/aiplatform/gapic/schema/__init__.py similarity index 100% rename from google/cloud/aiplatform/schema/__init__.py rename to google/cloud/aiplatform/gapic/schema/__init__.py diff --git a/samples/snippets/create_training_pipeline_image_classification_sample.py b/samples/snippets/create_training_pipeline_image_classification_sample.py index dddd3eb5cf..0ce8fe97f0 100644 --- a/samples/snippets/create_training_pipeline_image_classification_sample.py +++ b/samples/snippets/create_training_pipeline_image_classification_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_image_classification_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_image_classification_sample( @@ -30,13 +30,12 @@ def create_training_pipeline_image_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = trainingjob.definition.AutoMlImageClassificationInputs( + training_task_inputs = trainingjob.definition.AutoMlImageClassificationInputs( multi_label=True, model_type="CLOUD", budget_milli_node_hours=8000, disable_early_stopping=False, - ) - training_task_inputs = training_task_inputs_object.to_value() + ).to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_image_object_detection_sample.py b/samples/snippets/create_training_pipeline_image_object_detection_sample.py index bb6d243ca2..50829f44f4 100644 --- a/samples/snippets/create_training_pipeline_image_object_detection_sample.py +++ b/samples/snippets/create_training_pipeline_image_object_detection_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_image_object_detection_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_image_object_detection_sample( @@ -30,12 +30,11 @@ def create_training_pipeline_image_object_detection_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = trainingjob.definition.AutoMlImageObjectDetectionInputs( + training_task_inputs = trainingjob.definition.AutoMlImageObjectDetectionInputs( model_type="CLOUD_HIGH_ACCURACY_1", budget_milli_node_hours=20000, disable_early_stopping=False, - ) - training_task_inputs = training_task_inputs_object.to_value() + ).to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py b/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py index 5c04fccc79..7d69cc1265 100644 --- a/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py +++ b/samples/snippets/create_training_pipeline_tabular_forecasting_sample.py @@ -33,6 +33,7 @@ def create_training_pipeline_tabular_forecasting_sample( location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): + # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. diff --git a/samples/snippets/create_training_pipeline_text_classification_sample.py b/samples/snippets/create_training_pipeline_text_classification_sample.py index 95de59dfba..727173fd62 100644 --- a/samples/snippets/create_training_pipeline_text_classification_sample.py +++ b/samples/snippets/create_training_pipeline_text_classification_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_text_classification_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_text_classification_sample( @@ -30,10 +30,9 @@ def create_training_pipeline_text_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = ( - trainingjob.definition.AutoMlTextClassificationInputs() + training_task_inputs = ( + trainingjob.definition.AutoMlTextClassificationInputs().to_value() ) - training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py index 131a2aba77..6926a679a6 100644 --- a/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py +++ b/samples/snippets/create_training_pipeline_text_entity_extraction_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_text_entity_extraction_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_text_entity_extraction_sample( @@ -30,8 +30,9 @@ def create_training_pipeline_text_entity_extraction_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = trainingjob.definition.AutoMlTextExtractionInputs() - training_task_inputs = training_task_inputs_object.to_value() + training_task_inputs = ( + trainingjob.definition.AutoMlTextExtractionInputs().to_value() + ) training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py index b12cd95fd7..9386167ab2 100644 --- a/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py +++ b/samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_text_sentiment_analysis_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_text_sentiment_analysis_sample( @@ -31,10 +31,9 @@ def create_training_pipeline_text_sentiment_analysis_sample( # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) # Use sentiment_max of 4 - training_task_inputs_object = trainingjob.definition.AutoMlTextSentimentInputs( - sentiment_max=4 - ) - training_task_inputs = training_task_inputs_object.to_value() + training_task_inputs = trainingjob.definition.AutoMlTextSentimentInputs( + sentiment_max=4, + ).to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py index 5de1ecba9c..0b47bb9279 100644 --- a/samples/snippets/create_training_pipeline_video_action_recognition_sample.py +++ b/samples/snippets/create_training_pipeline_video_action_recognition_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_video_action_recognition_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_video_action_recognition_sample( @@ -31,11 +31,10 @@ def create_training_pipeline_video_action_recognition_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' - training_task_inputs_object = trainingjob.definition.AutoMlVideoActionRecognitionInputs( - model_type=model_type - ) - training_task_inputs = training_task_inputs_object.to_value() + training_task_inputs = trainingjob.definition.AutoMlVideoActionRecognitionInputs( + # modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1' + model_type=model_type, + ).to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_classification_sample.py b/samples/snippets/create_training_pipeline_video_classification_sample.py index f404ed62d9..593ae1efad 100644 --- a/samples/snippets/create_training_pipeline_video_classification_sample.py +++ b/samples/snippets/create_training_pipeline_video_classification_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_video_classification_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_video_classification_sample( @@ -30,10 +30,9 @@ def create_training_pipeline_video_classification_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = ( - trainingjob.definition.AutoMlVideoClassificationInputs() + training_task_inputs = ( + trainingjob.definition.AutoMlVideoClassificationInputs().to_value() ) - training_task_inputs = training_task_inputs_object.to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py index 562627f97c..6d2a7ba8a7 100644 --- a/samples/snippets/create_training_pipeline_video_object_tracking_sample.py +++ b/samples/snippets/create_training_pipeline_video_object_tracking_sample.py @@ -14,7 +14,7 @@ # [START aiplatform_create_training_pipeline_video_object_tracking_sample] from google.cloud import aiplatform -from google.cloud.aiplatform.schema import trainingjob +from google.cloud.aiplatform.gapic.schema import trainingjob def create_training_pipeline_video_object_tracking_sample( @@ -30,10 +30,9 @@ def create_training_pipeline_video_object_tracking_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PipelineServiceClient(client_options=client_options) - training_task_inputs_object = trainingjob.definition.AutoMlVideoObjectTrackingInputs( - model_type="CLOUD" - ) - training_task_inputs = training_task_inputs_object.to_value() + training_task_inputs = trainingjob.definition.AutoMlVideoObjectTrackingInputs( + model_type="CLOUD", + ).to_value() training_pipeline = { "display_name": display_name, diff --git a/samples/snippets/predict_image_classification_sample.py b/samples/snippets/predict_image_classification_sample.py index 958de5e156..4c5080316f 100644 --- a/samples/snippets/predict_image_classification_sample.py +++ b/samples/snippets/predict_image_classification_sample.py @@ -16,8 +16,7 @@ import base64 from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.gapic.schema import predict def predict_image_classification_sample( @@ -37,13 +36,14 @@ def predict_image_classification_sample( # The format of each instance should conform to the deployed model's prediction input schema. encoded_content = base64.b64encode(file_content).decode("utf-8") - instance_dict = {"content": encoded_content} - - instance = json_format.ParseDict(instance_dict, Value()) + instance = predict.instance.ImageClassificationPredictionInstance( + content=encoded_content, + ).to_value() instances = [instance] # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters. - parameters_dict = {"confidenceThreshold": 0.5, "maxPredictions": 5} - parameters = json_format.ParseDict(parameters_dict, Value()) + parameters = predict.params.ImageClassificationPredictionParams( + confidence_threshold=0.5, max_predictions=5, + ).to_value() endpoint = client.endpoint_path( project=project, location=location, endpoint=endpoint_id ) diff --git a/samples/snippets/predict_image_object_detection_sample.py b/samples/snippets/predict_image_object_detection_sample.py index 7b1f9afd1a..39a7c68f72 100644 --- a/samples/snippets/predict_image_object_detection_sample.py +++ b/samples/snippets/predict_image_object_detection_sample.py @@ -16,8 +16,7 @@ import base64 from google.cloud import aiplatform -from google.protobuf import json_format -from google.protobuf.struct_pb2 import Value +from google.cloud.aiplatform.gapic.schema import predict def predict_image_object_detection_sample( @@ -37,13 +36,14 @@ def predict_image_object_detection_sample( # The format of each instance should conform to the deployed model's prediction input schema. encoded_content = base64.b64encode(file_content).decode("utf-8") - instance_dict = {"content": encoded_content} - - instance = json_format.ParseDict(instance_dict, Value()) + instance = predict.instance.ImageObjectDetectionPredictionInstance( + content=encoded_content, + ).to_value() instances = [instance] # See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters. - parameters_dict = {"confidenceThreshold": 0.5, "maxPredictions": 5} - parameters = json_format.ParseDict(parameters_dict, Value()) + parameters = predict.params.ImageObjectDetectionPredictionParams( + confidence_threshold=0.5, max_predictions=5, + ).to_value() endpoint = client.endpoint_path( project=project, location=location, endpoint=endpoint_id ) diff --git a/samples/snippets/predict_text_classification_single_label_sample.py b/samples/snippets/predict_text_classification_single_label_sample.py index 3cd9e74e6f..3cf6b5a70b 100644 --- a/samples/snippets/predict_text_classification_single_label_sample.py +++ b/samples/snippets/predict_text_classification_single_label_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_predict_text_classification_single_label_sample] from google.cloud import aiplatform +from google.cloud.aiplatform.gapic.schema import predict from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -30,8 +31,9 @@ def predict_text_classification_single_label_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PredictionServiceClient(client_options=client_options) - instance_dict = {"content": content} - instance = json_format.ParseDict(instance_dict, Value()) + instance = predict.instance.TextClassificationPredictionInstance( + content=content, + ).to_value() instances = [instance] parameters_dict = {} parameters = json_format.ParseDict(parameters_dict, Value()) diff --git a/samples/snippets/predict_text_entity_extraction_sample.py b/samples/snippets/predict_text_entity_extraction_sample.py index ae7b352af7..30585deeaa 100644 --- a/samples/snippets/predict_text_entity_extraction_sample.py +++ b/samples/snippets/predict_text_entity_extraction_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_predict_text_entity_extraction_sample] from google.cloud import aiplatform +from google.cloud.aiplatform.gapic.schema import predict from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -31,8 +32,9 @@ def predict_text_entity_extraction_sample( # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PredictionServiceClient(client_options=client_options) # The format of each instance should conform to the deployed model's prediction input schema - instance_dict = {"content": content} - instance = json_format.ParseDict(instance_dict, Value()) + instance = predict.instance.TextExtractionPredictionInstance( + content=content, + ).to_value() instances = [instance] parameters_dict = {} parameters = json_format.ParseDict(parameters_dict, Value()) diff --git a/samples/snippets/predict_text_sentiment_analysis_sample.py b/samples/snippets/predict_text_sentiment_analysis_sample.py index 2aac58d984..6e3c26913c 100644 --- a/samples/snippets/predict_text_sentiment_analysis_sample.py +++ b/samples/snippets/predict_text_sentiment_analysis_sample.py @@ -14,6 +14,7 @@ # [START aiplatform_predict_text_sentiment_analysis_sample] from google.cloud import aiplatform +from google.cloud.aiplatform.gapic.schema import predict from google.protobuf import json_format from google.protobuf.struct_pb2 import Value @@ -30,8 +31,9 @@ def predict_text_sentiment_analysis_sample( # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.PredictionServiceClient(client_options=client_options) - instance_dict = {"content": content} - instance = json_format.ParseDict(instance_dict, Value()) + instance = predict.instance.TextSentimentPredictionInstance( + content=content, + ).to_value() instances = [instance] parameters_dict = {} parameters = json_format.ParseDict(parameters_dict, Value()) From 7650269d47e3a3bd0ea2fddcdfc5c622ba16f8ee Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 13 Jan 2021 10:43:46 -0800 Subject: [PATCH 34/34] chore: release 0.4.0 (#73) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++++ setup.py | 2 +- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a45ad41cff..7e2857250b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [0.4.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.3.1...v0.4.0) (2021-01-08) + + +### Features + +* add create_batch_prediction_job samples ([#67](https://www.github.com/googleapis/python-aiplatform/issues/67)) ([96a850f](https://www.github.com/googleapis/python-aiplatform/commit/96a850f2d24d7ae95f2cdec83a56362abecb85a2)) +* add create_hyperparameter_tuning_job_python_package sample ([#76](https://www.github.com/googleapis/python-aiplatform/issues/76)) ([5155dee](https://www.github.com/googleapis/python-aiplatform/commit/5155dee5edd86fb700a91dfca01bddd4d6393410)) +* add create_training_pipeline_custom_training_managed_dataset sample ([#75](https://www.github.com/googleapis/python-aiplatform/issues/75)) ([b012283](https://www.github.com/googleapis/python-aiplatform/commit/b012283c08cf8abc2974dc73ff7c2d3b8112a16b)) +* add custom_job samples ([#69](https://www.github.com/googleapis/python-aiplatform/issues/69)) ([fb165b3](https://www.github.com/googleapis/python-aiplatform/commit/fb165b3632119b361a1936f367128f7146b49685)) +* add data_labeling samples ([#78](https://www.github.com/googleapis/python-aiplatform/issues/78)) ([7daacd5](https://www.github.com/googleapis/python-aiplatform/commit/7daacd576dc96149c05e2908f276831337076316)) +* add get_custom_job and get_hyperparameter_tuning_job samples ([#68](https://www.github.com/googleapis/python-aiplatform/issues/68)) ([26da7a7](https://www.github.com/googleapis/python-aiplatform/commit/26da7a7d4c1f5db2d2c3b2faedccbd9899c14a47)) +* add schema namespace ([#140](https://www.github.com/googleapis/python-aiplatform/issues/140)) ([1cbd4a5](https://www.github.com/googleapis/python-aiplatform/commit/1cbd4a553fb8d035f687247ce87843167bf106ad)) +* add video action recognition samples ([#77](https://www.github.com/googleapis/python-aiplatform/issues/77)) ([4c60ad6](https://www.github.com/googleapis/python-aiplatform/commit/4c60ad67dcd9026cb989d6e81dec4813cbae962f)) +* Added tabular forecasting sample ([#156](https://www.github.com/googleapis/python-aiplatform/issues/156)) ([a23857b](https://www.github.com/googleapis/python-aiplatform/commit/a23857bc9be94c4a23ae7cf6f2eac75d8ea5ae95)) +* Added tabular forecasting samples ([#128](https://www.github.com/googleapis/python-aiplatform/issues/128)) ([69fc7fd](https://www.github.com/googleapis/python-aiplatform/commit/69fc7fd415e1b404530fd3e2881a94c0441791cf)) +* adds function/method enhancements, demo samples ([#122](https://www.github.com/googleapis/python-aiplatform/issues/122)) ([1a302d2](https://www.github.com/googleapis/python-aiplatform/commit/1a302d232d868a96bf6a41cbf92a550edcdb0673)) +* adds text batch prediction samples ([#82](https://www.github.com/googleapis/python-aiplatform/issues/82)) ([ad09c29](https://www.github.com/googleapis/python-aiplatform/commit/ad09c29c1685a904966e34894c1c4ea77baa2425)) +* initial generation of enhanced types ([#102](https://www.github.com/googleapis/python-aiplatform/issues/102)) ([5ddbf16](https://www.github.com/googleapis/python-aiplatform/commit/5ddbf16f35234dc1781de9d17310a345ac1524de)) +* update create_training_pipeline samples ([#142](https://www.github.com/googleapis/python-aiplatform/issues/142)) ([624a08d](https://www.github.com/googleapis/python-aiplatform/commit/624a08d65c2088c0d5272a7b1b88983a8c7e6284)) +* xai samples ([#83](https://www.github.com/googleapis/python-aiplatform/issues/83)) ([5cf3859](https://www.github.com/googleapis/python-aiplatform/commit/5cf38596d115da63cdddc8958b6ae8f455bdb9a6)) + + +### Bug Fixes + +* blacken on library, test files ([#135](https://www.github.com/googleapis/python-aiplatform/issues/135)) ([53cdbab](https://www.github.com/googleapis/python-aiplatform/commit/53cdbabdef6bd10488f49d0c3ed6f05149af32a6)) +* predict image samples params ([#150](https://www.github.com/googleapis/python-aiplatform/issues/150)) ([7983b44](https://www.github.com/googleapis/python-aiplatform/commit/7983b448158cf8166ada54c60fb896d5658a2162)) + + +### Documentation + +* update readme ([#81](https://www.github.com/googleapis/python-aiplatform/issues/81)) ([19dc31a](https://www.github.com/googleapis/python-aiplatform/commit/19dc31a7e63ec112e9d0dc72e22db04910137d07)) + ### [0.3.1](https://www.github.com/googleapis/python-aiplatform/compare/v0.3.0...v0.3.1) (2020-11-13) diff --git a/setup.py b/setup.py index 7f5075f314..d03986fded 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ import setuptools # type: ignore name = "google-cloud-aiplatform" -version = "0.3.1" +version = "0.4.0" description = "Cloud AI Platform API client library" package_root = os.path.abspath(os.path.dirname(__file__))