-
Notifications
You must be signed in to change notification settings - Fork 9
/
tutorial4.py
81 lines (66 loc) · 2.69 KB
/
tutorial4.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# -*- coding: utf-8 -*-
# BigQueryからデータを読み込み
# データを加工して
# BigQueryに書き込む
# +----------------+
# | |
# | Read BigQuery |
# | |
# +-------+--------+
# |
# v
# +-------+--------+
# | |
# | Modify Element |
# | |
# +----------------+
# |
# v
# +-------+--------+
# | |
# | Write BigQuery |
# | |
# +----------------+
import apache_beam as beam
# Dataflowの基本設定
# ジョブ名、プロジェクト名、一時ファイルの置き場を指定します。
options = beam.options.pipeline_options.PipelineOptions()
gcloud_options = options.view_as(
beam.options.pipeline_options.GoogleCloudOptions)
gcloud_options.job_name = 'dataflow-tutorial4'
gcloud_options.project = 'PROJECTID'
gcloud_options.staging_location = 'gs://PROJECTID/staging'
gcloud_options.temp_location = 'gs://PROJECTID/temp'
# Dataflowのスケール設定
# Workerの最大数や、マシンタイプ等を設定します。
# WorkerのDiskサイズはデフォルトで250GB(Batch)、420GB(Streaming)と大きいので、
# ここで必要サイズを指定する事をオススメします。
worker_options = options.view_as(beam.options.pipeline_options.WorkerOptions)
worker_options.disk_size_gb = 20
worker_options.max_num_workers = 2
# worker_options.num_workers = 2
# worker_options.machine_type = 'n1-standard-8'
# 実行環境の切り替え
# DirectRunner: ローカルマシンで実行します
# DataflowRunner: Dataflow上で実行します
# options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DirectRunner'
options.view_as(beam.options.pipeline_options.StandardOptions).runner = 'DataflowRunner'
def modify_data1(element):
# beam.Mapは1行の入力に対し1行の出力をする場合に使う
# element = {u'corpus_date': 0, u'corpus': u'sonnets', u'word': u'LVII', u'word_count': 1}
corpus_upper = element['corpus'].upper()
word_len = len(element['word'])
return {'corpus_upper': corpus_upper,
'word_len': word_len
}
p4 = beam.Pipeline(options=options)
query = 'SELECT * FROM [bigquery-public-data:samples.shakespeare] LIMIT 10'
(p4 | 'read' >> beam.io.Read(beam.io.BigQuerySource(project='PROJECTID', use_standard_sql=False, query=query))
| 'modify' >> beam.Map(modify_data1)
| 'write' >> beam.io.Write(beam.io.BigQuerySink(
'testdataset.testtable2',
schema='corpus_upper:STRING, word_len:INTEGER',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE))
)
p4.run() # .wait_until_finish()