|
| 1 | +#!/usr/bin/env python |
| 2 | +# coding: utf-8 |
| 3 | + |
| 4 | + |
| 5 | +import logging |
| 6 | +logger = logging.getLogger() |
| 7 | +logger.setLevel(logging.INFO) #DEBUG, INFO, WARNING, ERROR, CRITICAL |
| 8 | + |
| 9 | + |
| 10 | + |
| 11 | +import json, os, pandas as pd, pendulum |
| 12 | +from ProjectDomino.Neo4jDataAccess import Neo4jDataAccess |
| 13 | +from ProjectDomino.FirehoseJob import FirehoseJob |
| 14 | +from ProjectDomino.TwintPool import TwintPool |
| 15 | +from prefect.environments.storage import S3 |
| 16 | +from prefect import context, Flow, task |
| 17 | +from prefect.schedules import IntervalSchedule |
| 18 | +from datetime import timedelta, datetime |
| 19 | +from prefect.engine.executors import DaskExecutor |
| 20 | + |
| 21 | + |
| 22 | + |
| 23 | +S3_BUCKET = "wzy-project-domino" |
| 24 | + |
| 25 | +pd.set_option('display.max_colwidth', None) |
| 26 | +pd.set_option('display.max_rows', 500) |
| 27 | +pd.set_option('display.max_columns', 500) |
| 28 | +pd.set_option('display.width', 1000) |
| 29 | + |
| 30 | +def env_non_empty(x: str): |
| 31 | + return x in os.environ and os.environ[x] |
| 32 | + |
| 33 | +stride_sec = int(os.environ['DOMINO_STRIDE_SEC']) if env_non_empty('DOMINO_STRIDE_SEC') else 30 |
| 34 | +historic_stride_sec = int(os.environ['DOMINO_HISTORIC_STRIDE_SEC']) if env_non_empty('DOMINO_HISTORIC_STRIDE_SEC') else 60 * 60 * 24 |
| 35 | +twint_stride_sec = int(os.environ['DOMINO_TWINT_STRIDE_SEC']) if env_non_empty('DOMINO_TWINT_STRIDE_SEC') else round(historic_stride_sec/2) |
| 36 | +delay_sec = int(os.environ['DOMINO_DELAY_SEC']) if env_non_empty('DOMINO_DELAY_SEC') else 60 |
| 37 | +job_name = os.environ['DOMINO_JOB_NAME'] if env_non_empty('DOMINO_JOB_NAME') else "covid" |
| 38 | +start_date = pendulum.parse(os.environ['DOMINO_START_DATE']) if env_non_empty('DOMINO_START_DATE') else datetime.datetime.now() - datetime.timedelta(days=365) |
| 39 | +search = os.environ['DOMINO_SEARCH'] if env_non_empty('DOMINO_SEARCH') else "covid OR corona OR virus OR pandemic" |
| 40 | +write_format = os.environ['DOMINO_WRITE_FORMAT'] if env_non_empty('DOMINO_WRITE_FORMAT') else None |
| 41 | + |
| 42 | +if write_format == 'parquet_s3': |
| 43 | + s3_filepath = os.environ['DOMINO_S3_FILEPATH'] if env_non_empty('DOMINO_S3_FILEPATH') else None |
| 44 | + AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] |
| 45 | + AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] |
| 46 | + compression = os.environ['DOMINO_COMPRESSION'] if env_non_empty('DOMINO_COMPRESSION') else 'snappy' |
| 47 | + |
| 48 | +output_path = f'/output/{job_name}' |
| 49 | +os.makedirs(output_path, exist_ok=True) |
| 50 | + |
| 51 | + |
| 52 | +# FIXME unsafe when distributed |
| 53 | +task_num = -1 |
| 54 | + |
| 55 | +@task(log_stdout=True, skip_on_upstream_skip=True, max_retries=3, retry_delay=timedelta(seconds=30)) |
| 56 | +def run_stream(): |
| 57 | + |
| 58 | + global task_num |
| 59 | + task_num = task_num + 1 |
| 60 | + |
| 61 | + start = start_date + timedelta(seconds=task_num * historic_stride_sec) |
| 62 | + current = start + timedelta(seconds=historic_stride_sec) |
| 63 | + print('------------------------') |
| 64 | + print('task %s with start %s: %s to %s', task_num, start_date, start, current) |
| 65 | + #start = datetime.strptime("2020-10-06 22:10:00", "%Y-%m-%d %H:%M:%S") |
| 66 | + #current = datetime.strptime("2020-10-10 16:08:00", "%Y-%m-%d %H:%M:%S") |
| 67 | + #current = datetime.strptime(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S") |
| 68 | + #2020-10-10 16:07:30 |
| 69 | + #2020-10-11 06:29:00 to 2020-10-11 06:29:30: |
| 70 | + #2020-10-11 18:45:00 to 2020-10-11 18:45:30: |
| 71 | + #2020-10-05 17:00:30-2020-10-05 17:01:00 |
| 72 | + # 2020-10-06 22:10:00 to 2020-10-06 22:10:30: |
| 73 | + tp = TwintPool(is_tor=True) |
| 74 | + fh = FirehoseJob( |
| 75 | + PARQUET_SAMPLE_RATE_TIME_S=30, |
| 76 | + save_to_neo=False, |
| 77 | + writers={}, |
| 78 | + write_to_disk=write_format, |
| 79 | + write_opts=( |
| 80 | + { |
| 81 | + 's3_filepath': s3_filepath, |
| 82 | + 's3fs_options': { |
| 83 | + 'key': AWS_ACCESS_KEY_ID, |
| 84 | + 'secret': AWS_SECRET_ACCESS_KEY |
| 85 | + }, |
| 86 | + 'compression': compression |
| 87 | + } |
| 88 | + if write_format == 'parquet_s3' else |
| 89 | + {} |
| 90 | + ) |
| 91 | + ) |
| 92 | + |
| 93 | + try: |
| 94 | + for df in fh.search_time_range( |
| 95 | + tp=tp, |
| 96 | + Search=search, |
| 97 | + Since=datetime.strftime(start, "%Y-%m-%d %H:%M:%S"), |
| 98 | + Until=datetime.strftime(current, "%Y-%m-%d %H:%M:%S"), |
| 99 | + job_name=job_name, |
| 100 | + Limit=10000000, |
| 101 | + stride_sec=twint_stride_sec |
| 102 | + ): |
| 103 | + print('got: %s', df.shape if df is not None else 'None') |
| 104 | + except Exception as e: |
| 105 | + logger.error("job exception", exc_info=True) |
| 106 | + raise e |
| 107 | + print("task finished") |
| 108 | + |
| 109 | + |
| 110 | +schedule_opts = { |
| 111 | + 'interval': timedelta(seconds=stride_sec), |
| 112 | + 'start_date': pendulum.parse('2019-01-01 00:00:00') |
| 113 | +} |
| 114 | +logger.info(f'Schedule options: {schedule_opts}') |
| 115 | +logger.info(f'Task settings: stride_sec={stride_sec}, \ |
| 116 | + historic_stride_sec={historic_stride_sec}, \ |
| 117 | + twint_stride_sec={twint_stride_sec} \ |
| 118 | + start_date={start_date}, \ |
| 119 | + search={search}') |
| 120 | + |
| 121 | +schedule = IntervalSchedule(**schedule_opts) |
| 122 | +storage = S3(bucket=S3_BUCKET) |
| 123 | + |
| 124 | +#with Flow("covid-19 stream-single") as flow: |
| 125 | +#with Flow("covid-19 stream", storage=storage, schedule=schedule) as flow: |
| 126 | +with Flow(f"{job_name} stream", schedule=schedule) as flow: |
| 127 | + run_stream() |
| 128 | +flow.run() |
| 129 | + |
0 commit comments