|
| 1 | +#!/usr/bin/env python |
| 2 | +# coding: utf-8 |
| 3 | + |
| 4 | + |
| 5 | +import logging |
| 6 | +logger = logging.getLogger() |
| 7 | +logger.setLevel(logging.INFO) #DEBUG, INFO, WARNING, ERROR, CRITICAL |
| 8 | + |
| 9 | + |
| 10 | + |
| 11 | +import json, os, pandas as pd, pendulum, sys |
| 12 | +from ProjectDomino.Neo4jDataAccess import Neo4jDataAccess |
| 13 | +from ProjectDomino.FirehoseJob import FirehoseJob |
| 14 | +from ProjectDomino.TwintPool import TwintPool |
| 15 | +from prefect.environments.storage import S3 |
| 16 | +from prefect import context, Flow, task |
| 17 | +from prefect.schedules import IntervalSchedule |
| 18 | +from datetime import timedelta, datetime |
| 19 | +from prefect.engine.executors import DaskExecutor |
| 20 | + |
| 21 | + |
| 22 | +S3_BUCKET = "wzy-project-domino" |
| 23 | + |
| 24 | +pd.set_option('display.max_colwidth', None) |
| 25 | +pd.set_option('display.max_rows', 500) |
| 26 | +pd.set_option('display.max_columns', 500) |
| 27 | +pd.set_option('display.width', 1000) |
| 28 | + |
| 29 | +def env_non_empty(x: str): |
| 30 | + return x in os.environ and os.environ[x] |
| 31 | + |
| 32 | +def str_to_bool (x: str): |
| 33 | + if x in ['True', 'true', '1', 'TRUE']: |
| 34 | + return True |
| 35 | + elif x in ['False', 'false', '0', 'FALSE']: |
| 36 | + return False |
| 37 | + else: |
| 38 | + raise ValueError('Cannot convert to bool: ' + x) |
| 39 | + |
| 40 | +stride_sec = int(os.environ['DOMINO_STRIDE_SEC']) if env_non_empty('DOMINO_STRIDE_SEC') else 30 |
| 41 | +job_name = os.environ['DOMINO_JOB_NAME'] if env_non_empty('DOMINO_JOB_NAME') else "covid" |
| 42 | +write_format = os.environ['DOMINO_WRITE_FORMAT'] if env_non_empty('DOMINO_WRITE_FORMAT') else None |
| 43 | +fetch_profiles = str_to_bool(os.environ['DOMINO_FETCH_PROFILES']) if env_non_empty('DOMINO_FETCH_PROFILES') else False |
| 44 | +usernames_raw = os.environ['DOMINO_USERNAMES'] if env_non_empty('DOMINO_USERNAMES') else None |
| 45 | +if usernames_raw is None: |
| 46 | + raise ValueError('DOMINO_USERNAMES is not set, expected comma-delimited str') |
| 47 | +usernames = usernames_raw.split(',') |
| 48 | +usernames = [ x for x in usernames if len(x) > 0 ] |
| 49 | + |
| 50 | +if write_format == 'parquet_s3': |
| 51 | + s3_filepath = os.environ['DOMINO_S3_FILEPATH'] if env_non_empty('DOMINO_S3_FILEPATH') else None |
| 52 | + AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] |
| 53 | + AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] |
| 54 | + compression = os.environ['DOMINO_COMPRESSION'] if env_non_empty('DOMINO_COMPRESSION') else 'snappy' |
| 55 | + |
| 56 | +output_path = f'/output/{job_name}' |
| 57 | +os.makedirs(f'{output_path}/tweets', exist_ok=True) |
| 58 | +os.makedirs(f'{output_path}/profiles', exist_ok=True) |
| 59 | +os.makedirs(f'{output_path}/timelines', exist_ok=True) |
| 60 | + |
| 61 | + |
| 62 | +# FIXME unsafe when distributed |
| 63 | +usernames_queue = usernames.copy() |
| 64 | +pending = 0 |
| 65 | + |
| 66 | +@task(log_stdout=True, skip_on_upstream_skip=True, max_retries=3, retry_delay=timedelta(seconds=30)) |
| 67 | +def run_stream(): |
| 68 | + |
| 69 | + global pending |
| 70 | + |
| 71 | + if len(usernames_queue) == 0 and pending == 0: |
| 72 | + logger.info(f'Successfully processed all usernames ({len(usernames)}), exiting') |
| 73 | + sys.exit(0) |
| 74 | + |
| 75 | + if len(usernames_queue) == 0: |
| 76 | + logger.info(f'No more usernames to process, but {pending} jobs are still pending') |
| 77 | + return |
| 78 | + |
| 79 | + pending += 1 |
| 80 | + username = usernames_queue.pop(0) |
| 81 | + |
| 82 | + try: |
| 83 | + |
| 84 | + tp = TwintPool(is_tor=True) |
| 85 | + fh = FirehoseJob( |
| 86 | + PARQUET_SAMPLE_RATE_TIME_S=30, |
| 87 | + save_to_neo=False, |
| 88 | + tp=tp, |
| 89 | + writers={}, |
| 90 | + write_to_disk=write_format, |
| 91 | + write_opts=( |
| 92 | + { |
| 93 | + 's3_filepath': s3_filepath, |
| 94 | + 's3fs_options': { |
| 95 | + 'key': AWS_ACCESS_KEY_ID, |
| 96 | + 'secret': AWS_SECRET_ACCESS_KEY |
| 97 | + }, |
| 98 | + 'compression': compression |
| 99 | + } |
| 100 | + if write_format == 'parquet_s3' else |
| 101 | + {} |
| 102 | + ) |
| 103 | + ) |
| 104 | + |
| 105 | + try: |
| 106 | + for df in fh.get_timelines( |
| 107 | + usernames=[username], |
| 108 | + job_name=job_name, |
| 109 | + fetch_profiles = fetch_profiles |
| 110 | + ): |
| 111 | + print('got: %s', df.shape if df is not None else 'None') |
| 112 | + except Exception as e: |
| 113 | + logger.error("job exception", exc_info=True) |
| 114 | + raise e |
| 115 | + except: |
| 116 | + logger.error("task exception, reinserting user", exc_info=True) |
| 117 | + usernames_queue.insert(0, username) |
| 118 | + pending -= 1 |
| 119 | + print("task finished") |
| 120 | + |
| 121 | + |
| 122 | +schedule_opts = { |
| 123 | + 'interval': timedelta(seconds=stride_sec), |
| 124 | + 'start_date': pendulum.parse('2019-01-01 00:00:00') |
| 125 | +} |
| 126 | +logger.info(f'Schedule options: {schedule_opts}') |
| 127 | +logger.info(f'Task settings: stride_sec={stride_sec}') |
| 128 | + |
| 129 | +schedule = IntervalSchedule(**schedule_opts) |
| 130 | +storage = S3(bucket=S3_BUCKET) |
| 131 | + |
| 132 | +#with Flow("covid-19 stream-single") as flow: |
| 133 | +#with Flow("covid-19 stream", storage=storage, schedule=schedule) as flow: |
| 134 | +with Flow(f"{job_name} stream", schedule=schedule) as flow: |
| 135 | + run_stream() |
| 136 | +flow.run() |
| 137 | + |
0 commit comments