Skip to content

Instantly share code, notes, and snippets.

@SpicySyntax
Created February 25, 2021 17:01
Show Gist options
  • Save SpicySyntax/5e58efa3f1c1e8a3cffa4ee211abde98 to your computer and use it in GitHub Desktop.
Save SpicySyntax/5e58efa3f1c1e8a3cffa4ee211abde98 to your computer and use it in GitHub Desktop.
Airflow Workaround for Dataflow Flex Template Execution
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Dataflow Hook."""
import functools
import json
import re
import select
import shlex
import subprocess
import textwrap
import time
import uuid
import warnings
from copy import deepcopy
from tempfile import TemporaryDirectory
from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Set, TypeVar, Union, cast
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
# from airflow.utils.python_virtualenv import prepare_virtualenv
from airflow.utils.timeout import timeout
# This is the default location
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
DEFAULT_DATAFLOW_LOCATION = "us-central1"
JOB_ID_PATTERN = re.compile(
r"Submitted job: (?P<job_id_java>.*)|Created job with id: \[(?P<job_id_python>.*)\]"
)
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
def _fallback_variable_parameter(parameter_name: str, variable_key_name: str) -> Callable[[T], T]:
def _wrapper(func: T) -> T:
"""
Decorator that provides fallback for location from `region` key in `variables` parameters.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self: "DataflowHook", *args, **kwargs):
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
parameter_location = kwargs.get(parameter_name)
variables_location = kwargs.get("variables", {}).get(variable_key_name)
if parameter_location and variables_location:
raise AirflowException(
f"The mutually exclusive parameter `{parameter_name}` and `{variable_key_name}` key "
f"in `variables` parameter are both present. Please remove one."
)
if parameter_location or variables_location:
kwargs[parameter_name] = parameter_location or variables_location
if variables_location:
copy_variables = deepcopy(kwargs["variables"])
del copy_variables[variable_key_name]
kwargs["variables"] = copy_variables
return func(self, *args, **kwargs)
return cast(T, inner_wrapper)
return _wrapper
_fallback_to_location_from_variables = _fallback_variable_parameter("location", "region")
_fallback_to_project_id_from_variables = _fallback_variable_parameter("project_id", "project")
class DataflowJobStatus:
"""
Helper class with Dataflow job statuses.
Reference: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState
"""
JOB_STATE_DONE = "JOB_STATE_DONE"
JOB_STATE_UNKNOWN = "JOB_STATE_UNKNOWN"
JOB_STATE_STOPPED = "JOB_STATE_STOPPED"
JOB_STATE_RUNNING = "JOB_STATE_RUNNING"
JOB_STATE_FAILED = "JOB_STATE_FAILED"
JOB_STATE_CANCELLED = "JOB_STATE_CANCELLED"
JOB_STATE_UPDATED = "JOB_STATE_UPDATED"
JOB_STATE_DRAINING = "JOB_STATE_DRAINING"
JOB_STATE_DRAINED = "JOB_STATE_DRAINED"
JOB_STATE_PENDING = "JOB_STATE_PENDING"
JOB_STATE_CANCELLING = "JOB_STATE_CANCELLING"
JOB_STATE_QUEUED = "JOB_STATE_QUEUED"
FAILED_END_STATES = {JOB_STATE_FAILED, JOB_STATE_CANCELLED}
SUCCEEDED_END_STATES = {JOB_STATE_DONE, JOB_STATE_UPDATED, JOB_STATE_DRAINED}
TERMINAL_STATES = SUCCEEDED_END_STATES | FAILED_END_STATES
AWAITING_STATES = {
JOB_STATE_RUNNING,
JOB_STATE_PENDING,
JOB_STATE_QUEUED,
JOB_STATE_CANCELLING,
JOB_STATE_DRAINING,
JOB_STATE_STOPPED,
}
class DataflowJobType:
"""Helper class with Dataflow job types."""
JOB_TYPE_UNKNOWN = "JOB_TYPE_UNKNOWN"
JOB_TYPE_BATCH = "JOB_TYPE_BATCH"
JOB_TYPE_STREAMING = "JOB_TYPE_STREAMING"
class _DataflowJobsController(LoggingMixin):
"""
Interface for communication with Google API.
It's not use Apache Beam, but only Google Dataflow API.
:param dataflow: Discovery resource
:param project_number: The Google Cloud Project ID.
:param location: Job location.
:param poll_sleep: The status refresh rate for pending operations.
:param name: The Job ID prefix used when the multiple_jobs option is passed is set to True.
:param job_id: ID of a single job.
:param num_retries: Maximum number of retries in case of connection problems.
:param multiple_jobs: If set to true this task will be searched by name prefix (``name`` parameter),
not by specific job ID, then actions will be performed on all matching jobs.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling.
:param cancel_timeout: wait time in seconds for successful job canceling
:param wait_until_finished: If True, wait for the end of pipeline execution before exiting. If False,
it only submits job and check once is job not in terminal state.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
"""
def __init__( # pylint: disable=too-many-arguments
self,
dataflow: Any,
project_number: str,
location: str,
poll_sleep: int = 10,
name: Optional[str] = None,
job_id: Optional[str] = None,
num_retries: int = 0,
multiple_jobs: bool = False,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 5 * 60,
wait_until_finished: Optional[bool] = None,
) -> None:
super().__init__()
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._multiple_jobs = multiple_jobs
self._job_id = job_id
self._num_retries = num_retries
self._poll_sleep = poll_sleep
self._cancel_timeout = cancel_timeout
self._jobs: Optional[List[dict]] = None
self.drain_pipeline = drain_pipeline
self._wait_until_finished = wait_until_finished
self._jobs: Optional[List[dict]] = None
def is_job_running(self) -> bool:
"""
Helper method to check if jos is still running in dataflow
:return: True if job is running.
:rtype: bool
"""
self._refresh_jobs()
if not self._jobs:
return False
for job in self._jobs:
if job["currentState"] not in DataflowJobStatus.TERMINAL_STATES:
return True
return False
# pylint: disable=too-many-nested-blocks
def _get_current_jobs(self) -> List[dict]:
"""
Helper method to get list of jobs that start with job name or id
:return: list of jobs including id's
:rtype: list
"""
if not self._multiple_jobs and self._job_id:
return [self.fetch_job_by_id(self._job_id)]
elif self._job_name:
jobs = self._fetch_jobs_by_prefix_name(self._job_name.lower())
if len(jobs) == 1:
self._job_id = jobs[0]["id"]
return jobs
else:
raise Exception("Missing both dataflow job ID and name.")
def fetch_job_by_id(self, job_id: str) -> dict:
"""
Helper method to fetch the job with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:return: the Job
:rtype: dict
"""
return (
self._dataflow.projects()
.locations()
.jobs()
.get(
projectId=self._project_number,
location=self._job_location,
jobId=job_id,
)
.execute(num_retries=self._num_retries)
)
def fetch_job_metrics_by_id(self, job_id: str) -> dict:
"""
Helper method to fetch the job metrics with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:return: the JobMetrics. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/JobMetrics
:rtype: dict
"""
result = (
self._dataflow.projects()
.locations()
.jobs()
.getMetrics(projectId=self._project_number, location=self._job_location, jobId=job_id)
.execute(num_retries=self._num_retries)
)
self.log.debug("fetch_job_metrics_by_id %s:\n%s", job_id, result)
return result
def _fetch_list_job_messages_responses(self, job_id: str) -> Generator[dict, None, None]:
"""
Helper method to fetch ListJobMessagesResponse with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:return: yields the ListJobMessagesResponse. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse
:rtype: Generator[dict, None, None]
"""
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list(projectId=self._project_number, location=self._job_location, jobId=job_id)
)
while request is not None:
response = request.execute(num_retries=self._num_retries)
yield response
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list_next(previous_request=request, previous_response=response)
)
def fetch_job_messages_by_id(self, job_id: str) -> List[dict]:
"""
Helper method to fetch the job messages with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:return: the list of JobMessages. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#JobMessage
:rtype: List[dict]
"""
messages: List[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
messages.extend(response.get("jobMessages", []))
return messages
def fetch_job_autoscaling_events_by_id(self, job_id: str) -> List[dict]:
"""
Helper method to fetch the job autoscaling events with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:return: the list of AutoscalingEvents. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#autoscalingevent
:rtype: List[dict]
"""
autoscaling_events: List[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
autoscaling_events.extend(response.get("autoscalingEvents", []))
return autoscaling_events
def _fetch_all_jobs(self) -> List[dict]:
request = (
self._dataflow.projects()
.locations()
.jobs()
.list(projectId=self._project_number, location=self._job_location)
)
jobs: List[dict] = []
while request is not None:
response = request.execute(num_retries=self._num_retries)
jobs.extend(response["jobs"])
request = (
self._dataflow.projects()
.locations()
.jobs()
.list_next(previous_request=request, previous_response=response)
)
return jobs
def _fetch_jobs_by_prefix_name(self, prefix_name: str) -> List[dict]:
jobs = self._fetch_all_jobs()
jobs = [job for job in jobs if job["name"].startswith(prefix_name)]
return jobs
def _refresh_jobs(self) -> None:
"""
Helper method to get all jobs by name
:return: jobs
:rtype: list
"""
self._jobs = self._get_current_jobs()
if self._jobs:
for job in self._jobs:
self.log.info(
"Google Cloud DataFlow job %s is state: %s",
job["name"],
job["currentState"],
)
else:
self.log.info("Google Cloud DataFlow job not available yet..")
def _check_dataflow_job_state(self, job) -> bool:
"""
Helper method to check the state of one job in dataflow for this task
if job failed raise exception
:return: True if job is done.
:rtype: bool
:raise: Exception
"""
if self._wait_until_finished is None:
wait_for_running = job.get('type', '') == DataflowJobType.JOB_TYPE_STREAMING
else:
wait_for_running = not self._wait_until_finished
if job['currentState'] == DataflowJobStatus.JOB_STATE_DONE:
return True
elif job['currentState'] == DataflowJobStatus.JOB_STATE_FAILED:
raise Exception("Google Cloud Dataflow job {} has failed.".format(job['name']))
elif job['currentState'] == DataflowJobStatus.JOB_STATE_CANCELLED:
raise Exception("Google Cloud Dataflow job {} was cancelled.".format(job['name']))
elif job['currentState'] == DataflowJobStatus.JOB_STATE_DRAINED:
raise Exception("Google Cloud Dataflow job {} was drained.".format(job['name']))
elif job['currentState'] == DataflowJobStatus.JOB_STATE_UPDATED:
raise Exception("Google Cloud Dataflow job {} was updated.".format(job['name']))
elif job['currentState'] == DataflowJobStatus.JOB_STATE_RUNNING and wait_for_running:
return True
elif job['currentState'] in DataflowJobStatus.AWAITING_STATES:
return self._wait_until_finished is False
self.log.debug("Current job: %s", str(job))
raise Exception(
"Google Cloud Dataflow job {} was unknown state: {}".format(job["name"], job["currentState"])
)
def wait_for_done(self) -> None:
"""Helper method to wait for result of submitted job."""
self.log.info("Start waiting for done.")
self._refresh_jobs()
while self._jobs and not all(self._check_dataflow_job_state(job) for job in self._jobs):
self.log.info("Waiting for done. Sleep %s s", self._poll_sleep)
time.sleep(self._poll_sleep)
self._refresh_jobs()
def get_jobs(self, refresh: bool = False) -> List[dict]:
"""
Returns Dataflow jobs.
:param refresh: Forces the latest data to be fetched.
:type refresh: bool
:return: list of jobs
:rtype: list
"""
if not self._jobs or refresh:
self._refresh_jobs()
if not self._jobs:
raise ValueError("Could not read _jobs")
return self._jobs
def _wait_for_states(self, expected_states: Set[str]):
"""Waiting for the jobs to reach a certain state."""
if not self._jobs:
raise ValueError("The _jobs should be set")
while True:
self._refresh_jobs()
job_states = {job['currentState'] for job in self._jobs}
if not job_states.difference(expected_states):
return
unexpected_failed_end_states = expected_states - DataflowJobStatus.FAILED_END_STATES
if unexpected_failed_end_states.intersection(job_states):
unexpected_failed_jobs = {
job for job in self._jobs if job['currentState'] in unexpected_failed_end_states
}
raise AirflowException(
"Jobs failed: "
+ ", ".join(
f"ID: {job['id']} name: {job['name']} state: {job['currentState']}"
for job in unexpected_failed_jobs
)
)
time.sleep(self._poll_sleep)
def cancel(self) -> None:
"""Cancels or drains current job"""
jobs = self.get_jobs()
job_ids = [job["id"] for job in jobs if job["currentState"] not in DataflowJobStatus.TERMINAL_STATES]
if job_ids:
batch = self._dataflow.new_batch_http_request()
self.log.info("Canceling jobs: %s", ", ".join(job_ids))
for job in jobs:
requested_state = (
DataflowJobStatus.JOB_STATE_DRAINED
if self.drain_pipeline and job["type"] == DataflowJobType.JOB_TYPE_STREAMING
else DataflowJobStatus.JOB_STATE_CANCELLED
)
batch.add(
self._dataflow.projects()
.locations()
.jobs()
.update(
projectId=self._project_number,
location=self._job_location,
jobId=job["id"],
body={"requestedState": requested_state},
)
)
batch.execute()
if self._cancel_timeout and isinstance(self._cancel_timeout, int):
timeout_error_message = "Canceling jobs failed due to timeout ({}s): {}".format(
self._cancel_timeout, ", ".join(job_ids)
)
with timeout(seconds=self._cancel_timeout, error_message=timeout_error_message):
self._wait_for_states({DataflowJobStatus.JOB_STATE_CANCELLED})
else:
self.log.info("No jobs to cancel")
class _DataflowRunner(LoggingMixin):
def __init__(
self,
cmd: List[str],
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
) -> None:
super().__init__()
self.log.info("Running command: %s", " ".join(shlex.quote(c) for c in cmd))
self.on_new_job_id_callback = on_new_job_id_callback
self.job_id: Optional[str] = None
self._proc = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
def _process_fd(self, fd):
"""
Prints output to logs and lookup for job ID in each line.
:param fd: File descriptor.
"""
if fd == self._proc.stderr:
while True:
line = self._proc.stderr.readline().decode()
if not line:
return
self._process_line_and_extract_job_id(line)
self.log.warning(line.rstrip("\n"))
if fd == self._proc.stdout:
while True:
line = self._proc.stdout.readline().decode()
if not line:
return
self._process_line_and_extract_job_id(line)
self.log.info(line.rstrip("\n"))
raise Exception("No data in stderr or in stdout.")
def _process_line_and_extract_job_id(self, line: str) -> None:
"""
Extracts job_id.
:param line: URL from which job_id has to be extracted
:type line: str
"""
# Job id info: https://goo.gl/SE29y9.
matched_job = JOB_ID_PATTERN.search(line)
if matched_job:
job_id = matched_job.group("job_id_java") or matched_job.group("job_id_python")
self.log.info("Found Job ID: %s", job_id)
self.job_id = job_id
if self.on_new_job_id_callback:
self.on_new_job_id_callback(job_id)
def wait_for_done(self) -> Optional[str]:
"""
Waits for Dataflow job to complete.
:return: Job id
:rtype: Optional[str]
"""
self.log.info("Start waiting for DataFlow process to complete.")
self.job_id = None
reads = [self._proc.stderr, self._proc.stdout]
while True:
# Wait for at least one available fd.
readable_fds, _, _ = select.select(reads, [], [], 5)
if readable_fds is None:
self.log.info("Waiting for DataFlow process to complete.")
continue
for readable_fd in readable_fds:
self._process_fd(readable_fd)
if self._proc.poll() is not None:
break
# Corner case: check if more output was created between the last read and the process termination
for readable_fd in reads:
self._process_fd(readable_fd)
self.log.info("Process exited with return code: %s", self._proc.returncode)
if self._proc.returncode != 0:
raise Exception(f"DataFlow failed with return code {self._proc.returncode}")
return self.job_id
class DataflowHook(GoogleBaseHook):
"""
Hook for Google Dataflow.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 5 * 60,
wait_until_finished: Optional[bool] = None,
) -> None:
self.poll_sleep = poll_sleep
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> build:
"""Returns a Google Cloud Dataflow service object."""
http_authorized = self._authorize()
return build("dataflow", "v1b3", http=http_authorized, cache_discovery=False)
@GoogleBaseHook.provide_gcp_credential_file
def _start_dataflow(
self,
variables: dict,
name: str,
command_prefix: List[str],
project_id: str,
multiple_jobs: bool = False,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> None:
cmd = command_prefix + [
"--runner=DataflowRunner",
f"--project={project_id}",
]
if variables:
cmd.extend(self._options_to_args(variables))
runner = _DataflowRunner(cmd=cmd, on_new_job_id_callback=on_new_job_id_callback)
job_id = runner.wait_for_done()
job_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=name,
location=location,
poll_sleep=self.poll_sleep,
job_id=job_id,
num_retries=self.num_retries,
multiple_jobs=multiple_jobs,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_controller.wait_for_done()
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_java_dataflow(
self,
job_name: str,
variables: dict,
jar: str,
project_id: str,
job_class: Optional[str] = None,
append_job_name: bool = True,
multiple_jobs: bool = False,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> None:
"""
Starts Dataflow java job.
:param job_name: The name of the job.
:type job_name: str
:param variables: Variables passed to the job.
:type variables: dict
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param jar: Name of the jar for the job
:type job_class: str
:param job_class: Name of the java class for the job.
:type job_class: str
:param append_job_name: True if unique suffix has to be appended to job name.
:type append_job_name: bool
:param multiple_jobs: True if to check for multiple job in dataflow
:type multiple_jobs: bool
:param on_new_job_id_callback: Callback called when the job ID is known.
:type on_new_job_id_callback: callable
:param location: Job location.
:type location: str
"""
name = self._build_dataflow_job_name(job_name, append_job_name)
variables["jobName"] = name
variables["region"] = location
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
command_prefix = ["java", "-cp", jar, job_class] if job_class else ["java", "-jar", jar]
self._start_dataflow(
variables=variables,
name=name,
command_prefix=command_prefix,
project_id=project_id,
multiple_jobs=multiple_jobs,
on_new_job_id_callback=on_new_job_id_callback,
location=location,
)
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_template_dataflow(
self,
job_name: str,
variables: dict,
parameters: dict,
dataflow_template: str,
project_id: str,
append_job_name: bool = True,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
environment: Optional[dict] = None,
) -> dict:
"""
Starts Dataflow template job.
:param job_name: The name of the job.
:type job_name: str
:param variables: Map of job runtime environment options.
It will update environment argument if passed.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:type variables: dict
:param parameters: Parameters fot the template
:type parameters: dict
:param dataflow_template: GCS path to the template.
:type dataflow_template: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param append_job_name: True if unique suffix has to be appended to job name.
:type append_job_name: bool
:param on_new_job_id_callback: Callback called when the job ID is known.
:type on_new_job_id_callback: callable
:param location: Job location.
:type location: str
:type environment: Optional, Map of job runtime environment options.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:type environment: Optional[dict]
"""
name = self._build_dataflow_job_name(job_name, append_job_name)
environment = environment or {}
# available keys for runtime environment are listed here:
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
environment_keys = [
"numWorkers",
"maxWorkers",
"zone",
"serviceAccountEmail",
"tempLocation",
"bypassTempDirValidation",
"machineType",
"additionalExperiments",
"network",
"subnetwork",
"additionalUserLabels",
"kmsKeyName",
"ipConfiguration",
"workerRegion",
"workerZone",
]
for key in variables:
if key in environment_keys:
if key in environment:
self.log.warning(
"'%s' parameter in 'variables' will override of "
"the same one passed in 'environment'!",
key,
)
environment.update({key: variables[key]})
service = self.get_conn()
# pylint: disable=no-member
request = (
service.projects()
.locations()
.templates()
.launch(
projectId=project_id,
location=location,
gcsPath=dataflow_template,
body={
"jobName": name,
"parameters": parameters,
"environment": environment,
},
)
)
response = request.execute(num_retries=self.num_retries)
job_id = response["job"]["id"]
if on_new_job_id_callback:
on_new_job_id_callback(job_id)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=name,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
)
jobs_controller.wait_for_done()
return response["job"]
@GoogleBaseHook.fallback_to_default_project_id
def start_flex_template(
self,
body: dict,
location: str,
project_id: str,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
):
"""
Starts flex templates with the Dataflow pipeline.
:param body: The request body. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body
:param location: The location of the Dataflow job (for example europe-west1)
:type location: str
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param on_new_job_id_callback: A callback that is called when a Job ID is detected.
:return: the Job
"""
service = self.get_conn()
request = (
service.projects() # pylint: disable=no-member
.locations()
.flexTemplates()
.launch(projectId=project_id, body=body, location=location)
)
response = request.execute(num_retries=self.num_retries)
job_id = response["job"]["id"]
if on_new_job_id_callback:
on_new_job_id_callback(job_id)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
)
jobs_controller.wait_for_done()
return jobs_controller.get_jobs(refresh=True)[0]
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_python_dataflow( # pylint: disable=too-many-arguments
self,
job_name: str,
variables: dict,
dataflow: str,
py_options: List[str],
project_id: str,
py_interpreter: str = "python3",
py_requirements: Optional[List[str]] = None,
py_system_site_packages: bool = False,
append_job_name: bool = True,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
):
"""
Starts Dataflow job.
:param job_name: The name of the job.
:type job_name: str
:param variables: Variables passed to the job.
:type variables: Dict
:param dataflow: Name of the Dataflow process.
:type dataflow: str
:param py_options: Additional options.
:type py_options: List[str]
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system or you want
to use a different version.
:type py_requirements: List[str]
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:type py_interpreter: str
:param append_job_name: True if unique suffix has to be appended to job name.
:type append_job_name: bool
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param on_new_job_id_callback: Callback called when the job ID is known.
:type on_new_job_id_callback: callable
:param location: Job location.
:type location: str
"""
name = self._build_dataflow_job_name(job_name, append_job_name)
variables["job_name"] = name
variables["region"] = location
if "labels" in variables:
variables["labels"] = [f"{key}={value}" for key, value in variables["labels"].items()]
# if py_requirements is not None:
# if not py_requirements and not py_system_site_packages:
# warning_invalid_environment = textwrap.dedent(
# """\
# Invalid method invocation. You have disabled inclusion of system packages and empty list
# required for installation, so it is not possible to create a valid virtual environment.
# In the virtual environment, apache-beam package must be installed for your job to be \
# executed. To fix this problem:
# * install apache-beam on the system, then set parameter py_system_site_packages to True,
# * add apache-beam to the list of required packages in parameter py_requirements.
# """
# )
# raise AirflowException(warning_invalid_environment)
# with TemporaryDirectory(prefix="dataflow-venv") as tmp_dir:
# py_interpreter = prepare_virtualenv(
# venv_directory=tmp_dir,
# python_bin=py_interpreter,
# system_site_packages=py_system_site_packages,
# requirements=py_requirements,
# )
# command_prefix = [py_interpreter] + py_options + [dataflow]
# self._start_dataflow(
# variables=variables,
# name=name,
# command_prefix=command_prefix,
# project_id=project_id,
# on_new_job_id_callback=on_new_job_id_callback,
# location=location,
# )
# else:
command_prefix = [py_interpreter] + py_options + [dataflow]
self._start_dataflow(
variables=variables,
name=name,
command_prefix=command_prefix,
project_id=project_id,
on_new_job_id_callback=on_new_job_id_callback,
location=location,
)
@staticmethod
def _build_dataflow_job_name(job_name: str, append_job_name: bool = True) -> str:
base_job_name = str(job_name).replace("_", "-")
if not re.match(r"^[a-z]([-a-z0-9]*[a-z0-9])?$", base_job_name):
raise ValueError(
"Invalid job_name ({}); the name must consist of"
"only the characters [-a-z0-9], starting with a "
"letter and ending with a letter or number ".format(base_job_name)
)
if append_job_name:
safe_job_name = base_job_name + "-" + str(uuid.uuid4())[:8]
else:
safe_job_name = base_job_name
return safe_job_name
@staticmethod
def _options_to_args(variables: dict) -> List[str]:
if not variables:
return []
# The logic of this method should be compatible with Apache Beam:
# https://github.com/apache/beam/blob/b56740f0e8cd80c2873412847d0b336837429fb9/sdks/python/
# apache_beam/options/pipeline_options.py#L230-L251
args: List[str] = []
for attr, value in variables.items():
if value is None or (isinstance(value, bool) and value):
args.append(f"--{attr}")
elif isinstance(value, list):
args.extend([f"--{attr}={v}" for v in value])
else:
args.append(f"--{attr}={value}")
return args
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def is_job_dataflow_running(
self,
name: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
variables: Optional[dict] = None,
) -> bool:
"""
Helper method to check if jos is still running in dataflow
:param name: The name of the job.
:type name: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:return: True if job is running.
:rtype: bool
"""
if variables:
warnings.warn(
"The variables parameter has been deprecated. You should pass location using "
"the location parameter.",
DeprecationWarning,
stacklevel=4,
)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=name,
location=location,
poll_sleep=self.poll_sleep,
drain_pipeline=self.drain_pipeline,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
)
return jobs_controller.is_job_running()
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
project_id: str,
job_name: Optional[str] = None,
job_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> None:
"""
Cancels the job with the specified name prefix or Job ID.
Parameter ``name`` and ``job_id`` are mutually exclusive.
:param job_name: Name prefix specifying which jobs are to be canceled.
:type job_name: str
:param job_id: Job ID specifying which jobs are to be canceled.
:type job_id: str
:param location: Job location.
:type location: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id:
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=job_name,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
drain_pipeline=self.drain_pipeline,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
)
jobs_controller.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def start_sql_job(
self,
job_name: str,
query: str,
options: Dict[str, Any],
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
on_new_job_id_callback: Optional[Callable[[str], None]] = None,
):
"""
Starts Dataflow SQL query.
:param job_name: The unique name to assign to the Cloud Dataflow job.
:type job_name: str
:param query: The SQL query to execute.
:type query: str
:param options: Job parameters to be executed.
For more information, look at:
`https://cloud.google.com/sdk/gcloud/reference/beta/dataflow/sql/query
<gcloud beta dataflow sql query>`__
command reference
:param location: The location of the Dataflow job (for example europe-west1)
:type location: str
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param on_new_job_id_callback: Callback called when the job ID is known.
:type on_new_job_id_callback: callable
:return: the new job object
"""
cmd = [
"gcloud",
"dataflow",
"sql",
"query",
query,
f"--project={project_id}",
"--format=value(job.id)",
f"--job-name={job_name}",
f"--region={location}",
*(self._options_to_args(options)),
]
self.log.info("Executing command: %s", " ".join([shlex.quote(c) for c in cmd]))
with self.provide_authorized_gcloud():
proc = subprocess.run( # pylint: disable=subprocess-run-check
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.log.info("Output: %s", proc.stdout.decode())
self.log.warning("Stderr: %s", proc.stderr.decode())
self.log.info("Exit code %d", proc.returncode)
if proc.returncode != 0:
raise AirflowException(f"Process exit with non-zero exit code. Exit code: {proc.returncode}")
job_id = proc.stdout.decode().strip()
self.log.info("Created job ID: %s", job_id)
if on_new_job_id_callback:
on_new_job_id_callback(job_id)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
drain_pipeline=self.drain_pipeline,
)
jobs_controller.wait_for_done()
return jobs_controller.get_jobs(refresh=True)[0]
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> dict:
"""
Gets the job with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id:
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:return: the Job
:rtype: dict
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_metrics_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> dict:
"""
Gets the job metrics with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id:
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:return: the JobMetrics. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/JobMetrics
:rtype: dict
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_metrics_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_messages_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> List[dict]:
"""
Gets the job messages with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id:
:param location: Job location.
:type location: str
:return: the list of JobMessages. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#JobMessage
:rtype: List[dict]
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_messages_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_autoscaling_events_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> List[dict]:
"""
Gets the job autoscaling events with the specified Job ID.
:param job_id: Job ID to get.
:type job_id: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id:
:param location: Job location.
:type location: str
:return: the list of AutoscalingEvents. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#autoscalingevent
:rtype: List[dict]
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_autoscaling_events_by_id(job_id)
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataflow operators."""
import copy
import re
from contextlib import ExitStack
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.dataflow import DEFAULT_DATAFLOW_LOCATION
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
from airflow.version import version
# Airflow Dataflow flex workaround
import sys
import os
import_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, import_path)
import dataflow_hook as dataflow_hook
class CheckJobRunning(Enum):
"""
Helper enum for choosing what to do if job is already running
IgnoreJob - do not check if running
FinishIfRunning - finish current dag run with no action
WaitForRun - wait for job to finish and then continue with new job
"""
IgnoreJob = 1
FinishIfRunning = 2
WaitForRun = 3
# pylint: disable=too-many-instance-attributes
class DataflowCreateJavaJobOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
**Example**: ::
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date':
(2016, 8, 1),
'email': ['alex@vanboxel.be'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'us-central1-f',
'stagingLocation': 'gs://bucket/tmp/dataflow/staging/',
}
}
dag = DAG('test-dag', default_args=default_args)
task = DataFlowJavaOperator(
gcp_conn_id='gcp_default',
task_id='normalize-cal',
jar='{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=dag)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar (templated).
:type jar: str
:param job_name: The 'jobName' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:type job_name: str
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type options: dict
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:type job_class: str
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs
:type multiple_jobs: boolean
:param check_if_running: before running job, validate that a previous run is not in process
:type check_if_running: CheckJobRunning(IgnoreJob = do not check if running, FinishIfRunning=
if job is running finish with nothing, WaitForRun= wait until job finished and the run job)
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:type cancel_timeout: Optional[int]
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:type wait_until_finished: Optional[bool]
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
'dataflow_default_options': {
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataFlowJavaOperator(
task_id='dataflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY',
'labels': {'foo' : 'bar'}
},
gcp_conn_id='airflow-conn-id',
dag=my-dag)
"""
template_fields = ["options", "jar", "job_name"]
ui_color = "#0273d4"
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
*,
jar: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
job_class: Optional[str] = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
multiple_jobs: Optional[bool] = None,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.multiple_jobs = multiple_jobs
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
self.check_if_running = check_if_running
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.hook = None
def execute(self, context):
self.hook = dataflow_hook.DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
is_running = False
if self.check_if_running != CheckJobRunning.IgnoreJob:
is_running = self.hook.is_job_dataflow_running( # type: ignore[attr-defined]
name=self.job_name,
variables=dataflow_options,
project_id=self.project_id,
location=self.location,
)
while is_running and self.check_if_running == CheckJobRunning.WaitForRun:
is_running = self.hook.is_job_dataflow_running( # type: ignore[attr-defined]
name=self.job_name,
variables=dataflow_options,
project_id=self.project_id,
location=self.location,
)
if not is_running:
with ExitStack() as exit_stack:
if self.jar.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context( # pylint: disable=no-member
gcs_hook.provide_file(object_url=self.jar)
)
self.jar = tmp_gcs_file.name
def set_current_job_id(job_id):
self.job_id = job_id
self.hook.start_java_dataflow( # type: ignore[attr-defined]
job_name=self.job_name,
variables=dataflow_options,
jar=self.jar,
job_class=self.job_class,
append_job_name=True,
multiple_jobs=self.multiple_jobs,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location,
)
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
# pylint: disable=too-many-instance-attributes
class DataflowTemplatedJobStartOperator(BaseOperator):
"""
Start a Templated Cloud DataFlow job. The parameters of the operation
will be passed to the job.
:param template: The reference to the DataFlow template.
:type template: str
:param job_name: The 'jobName' to use when executing the DataFlow template
(templated).
:param options: Map of job runtime environment options.
It will update environment argument if passed.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:type options: dict
:param dataflow_default_options: Map of default job environment options.
:type dataflow_default_options: dict
:param parameters: Map of job specific parameters for the template.
:type parameters: dict
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:type environment: Optional, Map of job runtime environment options.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:type environment: Optional[dict]
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:type cancel_timeout: Optional[int]
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:type wait_until_finished: Optional[bool]
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
.. code-block:: python
default_args = {
'dataflow_default_options': {
'zone': 'europe-west1-d',
'tempLocation': 'gs://my-staging-bucket/staging/',
}
}
}
You need to pass the path to your dataflow template as a file reference with the
``template`` parameter. Use ``parameters`` to pass on parameters to your job.
Use ``environment`` to pass on runtime environment variables to your job.
.. code-block:: python
t1 = DataflowTemplateOperator(
task_id='dataflow_example',
template='{{var.value.gcp_dataflow_base}}',
parameters={
'inputFile': "gs://bucket/input/my_input.txt",
'outputFile': "gs://bucket/output/my_output.txt"
},
gcp_conn_id='airflow-conn-id',
dag=my-dag)
``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are
templated so you can use variables in them.
Note that ``dataflow_default_options`` is expected to save high-level options
for project information, which apply to all dataflow operators in the DAG.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3
/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
For more detail on job template execution have a look at the reference:
https://cloud.google.com/dataflow/docs/templates/executing-templates
"""
template_fields = [
"template",
"job_name",
"options",
"parameters",
"project_id",
"location",
"gcp_conn_id",
"impersonation_chain",
"environment",
]
ui_color = "#0273d4"
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
template: str,
job_name: str = "{{task.task_id}}",
options: Optional[Dict[str, Any]] = None,
dataflow_default_options: Optional[Dict[str, Any]] = None,
parameters: Optional[Dict[str, str]] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
environment: Optional[Dict] = None,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template = template
self.job_name = job_name
self.options = options or {}
self.dataflow_default_options = dataflow_default_options or {}
self.parameters = parameters or {}
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.job_id = None
self.hook: Optional[dataflow_hook.DataflowHook] = None
self.impersonation_chain = impersonation_chain
self.environment = environment
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
def execute(self, context) -> dict:
self.hook = dataflow_hook.DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
def set_current_job_id(job_id):
self.job_id = job_id
options = self.dataflow_default_options
options.update(self.options)
job = self.hook.start_template_dataflow(
job_name=self.job_name,
variables=options,
parameters=self.parameters,
dataflow_template=self.template,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location,
environment=self.environment,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
class DataflowStartFlexTemplateOperator(BaseOperator):
"""
Starts flex templates with the Dataflow pipeline.
:param body: The request body. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body
:param location: The location of the Dataflow job (for example europe-west1)
:type location: str
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:type drain_pipeline: bool
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:type cancel_timeout: Optional[int]
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:type wait_until_finished: Optional[bool]
"""
template_fields = ["body", "location", "project_id", "gcp_conn_id"]
@apply_defaults
def __init__(
self,
body: Dict,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.body = body
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.hook: Optional[dataflow_hook.DataflowHook] = None
def execute(self, context):
self.hook = dataflow_hook.DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
def set_current_job_id(job_id):
self.job_id = job_id
job = self.hook.start_flex_template(
body=self.body,
location=self.location,
project_id=self.project_id,
on_new_job_id_callback=set_current_job_id,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
class DataflowStartSqlJobOperator(BaseOperator):
"""
Starts Dataflow SQL query.
:param job_name: The unique name to assign to the Cloud Dataflow job.
:type job_name: str
:param query: The SQL query to execute.
:type query: str
:param options: Job parameters to be executed. It can be a dictionary with the following keys.
For more information, look at:
`https://cloud.google.com/sdk/gcloud/reference/beta/dataflow/sql/query
<gcloud beta dataflow sql query>`__
command reference
:param options: dict
:param location: The location of the Dataflow job (for example europe-west1)
:type location: str
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:type drain_pipeline: bool
"""
template_fields = [
"job_name",
"query",
"options",
"location",
"project_id",
"gcp_conn_id",
]
@apply_defaults
def __init__(
self,
job_name: str,
query: str,
options: Dict[str, Any],
location: str = DEFAULT_DATAFLOW_LOCATION,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
drain_pipeline: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.job_name = job_name
self.query = query
self.options = options
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.drain_pipeline = drain_pipeline
self.job_id = None
self.hook: Optional[dataflow_hook.DataflowHook] = None
def execute(self, context):
self.hook = dataflow_hook.DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
drain_pipeline=self.drain_pipeline,
)
def set_current_job_id(job_id):
self.job_id = job_id
job = self.hook.start_sql_job(
job_name=self.job_name,
query=self.query,
options=self.options,
location=self.location,
project_id=self.project_id,
on_new_job_id_callback=set_current_job_id,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
# pylint: disable=too-many-instance-attributes
class DataflowCreatePythonJobOperator(BaseOperator):
"""
Launching Cloud Dataflow jobs written in python. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param py_file: Reference to the python dataflow pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:type py_file: str
:param job_name: The 'job_name' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'`` in ``options`` will be overwritten.
:type job_name: str
:param py_options: Additional python options, e.g., ["-m", "-v"].
:type py_options: list[str]
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type options: dict
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:type py_requirements: List[str]
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: Job location.
:type location: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:type poll_sleep: int
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:type drain_pipeline: bool
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:type cancel_timeout: Optional[int]
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:type wait_until_finished: Optional[bool]
"""
template_fields = ["options", "dataflow_default_options", "job_name", "py_file"]
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
py_file: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
py_interpreter: str = "python3",
py_options: Optional[List[str]] = None,
py_requirements: Optional[List[str]] = None,
py_system_site_packages: bool = False,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.py_file = py_file
self.job_name = job_name
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.hook: Optional[dataflow_hook.DataflowHook] = None
def execute(self, context):
"""Execute the python dataflow job."""
with ExitStack() as exit_stack:
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context( # pylint: disable=no-member
gcs_hook.provide_file(object_url=self.py_file)
)
self.py_file = tmp_gcs_file.name
self.hook = dataflow_hook.DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(r"[A-Z]", lambda x: "_" + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key] for key in dataflow_options}
def set_current_job_id(job_id):
self.job_id = job_id
self.hook.start_python_dataflow( # type: ignore[attr-defined]
job_name=self.job_name,
variables=formatted_options,
dataflow=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
on_new_job_id_callback=set_current_job_id,
project_id=self.project_id,
location=self.location,
)
return {"job_id": self.job_id}
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id)
import dataflow_operator as dataflow_operator
....
on_prem_writeback_operator = dataflow_operator.DataflowStartFlexTemplateOperator(
...
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment