forked from GoogleCloudPlatform/professional-services
-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_lake_to_mart.py
306 lines (286 loc) · 10.6 KB
/
data_lake_to_mart.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" data_lake_to_mart.py demonstrates a Dataflow pipeline which reads a
large BigQuery Table, joins in another dataset, and writes its contents to a
BigQuery table.
"""
from __future__ import absolute_import
import argparse
import logging
import os
import traceback
import apache_beam as beam
from apache_beam.io.gcp.bigquery import parse_table_schema_from_json
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pvalue import AsDict
class DataLakeToDataMart:
"""A helper class which contains the logic to translate the file into
a format BigQuery will accept.
This example uses side inputs to join two datasets together.
"""
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.schema_str = ''
# This is the schema of the destination table in BigQuery.
schema_file = os.path.join(dir_path, 'resources', 'orders_denormalized.json')
with open(schema_file) as f:
data = f.read()
# Wrapping the schema in fields is required for the BigQuery API.
self.schema_str = '{"fields": ' + data + '}'
def get_orders_query(self):
"""This returns a query against a very large fact table. We are
using a fake orders dataset to simulate a fact table in a typical
data warehouse."""
orders_query = """SELECT
acct_number,
col_number,
col_number_1,
col_number_10,
col_number_100,
col_number_101,
col_number_102,
col_number_103,
col_number_104,
col_number_105,
col_number_106,
col_number_107,
col_number_108,
col_number_109,
col_number_11,
col_number_110,
col_number_111,
col_number_112,
col_number_113,
col_number_114,
col_number_115,
col_number_116,
col_number_117,
col_number_118,
col_number_119,
col_number_12,
col_number_120,
col_number_121,
col_number_122,
col_number_123,
col_number_124,
col_number_125,
col_number_126,
col_number_127,
col_number_128,
col_number_129,
col_number_13,
col_number_130,
col_number_131,
col_number_132,
col_number_133,
col_number_134,
col_number_135,
col_number_136,
col_number_14,
col_number_15,
col_number_16,
col_number_17,
col_number_18,
col_number_19,
col_number_2,
col_number_20,
col_number_21,
col_number_22,
col_number_23,
col_number_24,
col_number_25,
col_number_26,
col_number_27,
col_number_28,
col_number_29,
col_number_3,
col_number_30,
col_number_31,
col_number_32,
col_number_33,
col_number_34,
col_number_35,
col_number_36,
col_number_37,
col_number_38,
col_number_39,
col_number_4,
col_number_40,
col_number_41,
col_number_42,
col_number_43,
col_number_44,
col_number_45,
col_number_46,
col_number_47,
col_number_48,
col_number_49,
col_number_5,
col_number_50,
col_number_51,
col_number_52,
col_number_53,
col_number_54,
col_number_55,
col_number_56,
col_number_57,
col_number_58,
col_number_59,
col_number_6,
col_number_60,
col_number_61,
col_number_62,
col_number_63,
col_number_64,
col_number_65,
col_number_66,
col_number_67,
col_number_68,
col_number_69,
col_number_7,
col_number_70,
col_number_71,
col_number_72,
col_number_73,
col_number_74,
col_number_75,
col_number_76,
col_number_77,
col_number_78,
col_number_79,
col_number_8,
col_number_80,
col_number_81,
col_number_82,
col_number_83,
col_number_84,
col_number_85,
col_number_86,
col_number_87,
col_number_88,
col_number_89,
col_number_9,
col_number_90,
col_number_91,
col_number_92,
col_number_93,
col_number_94,
col_number_95,
col_number_96,
col_number_97,
col_number_98,
col_number_99,
col_number_num1,
date,
foo,
num1,
num2,
num3,
num5,
num6,
product_number,
quantity
FROM
`python-dataflow-example.example_data.orders` orders
LIMIT
10
"""
return orders_query
def add_account_details(self, row, account_details):
"""add_account_details joins two datasets together. Dataflow passes in the
a row from the orders dataset along with the entire account details dataset.
This works because the entire account details dataset can be passed in memory.
The function then looks up the account details, and adds all columns to a result
dictionary, which will be written to BigQuery."""
result = row.copy()
try:
result.update(account_details[row['acct_number']])
except KeyError as err:
traceback.print_exc()
logging.error("Account Not Found error: %s", err)
return result
def run(argv=None):
"""The main function which creates the pipeline and runs it."""
parser = argparse.ArgumentParser()
# Here we add some specific command line arguments we expect. S
# This defaults the output table in your BigQuery you'll have
# to create the example_data dataset yourself using bq mk temp
parser.add_argument('--output', dest='output', required=False,
help='Output BQ table to write results to.',
default='lake.orders_denormalized_sideinput')
# Parse arguments from the command line.
known_args, pipeline_args = parser.parse_known_args(argv)
# DataLakeToDataMart is a class we built in this script to hold the logic for
# transforming the file into a BigQuery table.
data_lake_to_data_mart = DataLakeToDataMart()
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
schema = parse_table_schema_from_json(data_lake_to_data_mart.schema_str)
pipeline = beam.Pipeline(options=PipelineOptions(pipeline_args))
# This query returns details about the account, normalized into a
# different table. We will be joining the data in to the main orders dataset in order
# to create a denormalized table.
account_details_source = (
pipeline
| 'Read Account Details from BigQuery ' >> beam.io.Read(
beam.io.BigQuerySource(query="""
SELECT
acct_number,
acct_company_name,
acct_group_name,
acct_name,
acct_org_name,
address,
city,
state,
zip_code,
country
FROM
`python-dataflow-example.example_data.account`""",
# This next stage of the pipeline maps the acct_number to a single row of
# results from BigQuery. Mapping this way helps Dataflow move your data around
# to different workers. When later stages of the pipeline run, all results from
# a given account number will run on one worker.
use_standard_sql=True))
| 'Account Details' >> beam.Map(
lambda row: (
row['acct_number'], row
)))
orders_query = data_lake_to_data_mart.get_orders_query()
(p
# Read the orders from BigQuery. This is the source of the pipeline. All further
# processing starts with rows read from the query results here.
| 'Read Orders from BigQuery ' >> beam.io.Read(
beam.io.BigQuerySource(query=orders_query, use_standard_sql=True))
# Here we pass in a side input, which is data that comes from outside our
# main source. The side input contains a map of states to their full name
| 'Join Data with sideInput' >> beam.Map(data_lake_to_data_mart.add_account_details, AsDict(
account_details_source))
# This is the final stage of the pipeline, where we define the destination
# of the data. In this case we are writing to BigQuery.
| 'Write Data to BigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
# The table name is a required argument for the BigQuery sink.
# In this case we use the value passed in from the command line.
known_args.output,
# Here we use the JSON schema read in from a JSON file.
# Specifying the schema allows the API to create the table correctly if it does not yet exist.
schema=schema,
# Creates the table in BigQuery if it does not yet exist.
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
# Deletes all data in the BigQuery table before writing.
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)))
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()