|
12 | 12 | RELEVANCE = "RELEVANCE" |
13 | 13 | ESS = "EVENT_SENTIMENT_SCORE" |
14 | 14 |
|
15 | | -# In EDGE, it's not allowed to have a lookback lower than 2 days |
16 | | -# But in RPA it's possible to have a lookback of 1 day: |
17 | | -if PRODUCT == "edge": |
18 | | - fields = [ |
19 | | - {"avg_2d": {"avg": {"field": ESS, "mode": "granular"}}}, |
20 | | - {"avg_7d": {"avg": {"field": "avg_2d", "lookback": 7, "mode": "granular"}}}, |
21 | | - { |
22 | | - "buzz_30d": { |
23 | | - "buzz": {"field": "RP_ENTITY_ID", "lookback": 30, "mode": "granular"} |
24 | | - } |
25 | | - }, |
26 | | - {"newsvolume_2d": {"count": {"field": "RP_ENTITY_ID", "lookback": 2}}}, |
27 | | - ] |
28 | | -else: |
29 | | - fields = [ |
30 | | - {"avg_1d": {"avg": {"field": ESS, "lookback": 1, "mode": "granular"}}}, |
31 | | - {"avg_7d": {"avg": {"field": "avg_1d", "lookback": 7, "mode": "granular"}}}, |
32 | | - {"buzz_365d": {"buzz": {"field": "RP_ENTITY_ID", "lookback": 365}}}, |
33 | | - {"newsvolume_1d": {"count": {"field": "RP_ENTITY_ID", "lookback": 1}}}, |
34 | | - { |
35 | | - "newsvolume_365d": { |
36 | | - "avg": {"field": "newsvolume_1d", "lookback": 365, "mode": "granular"} |
37 | | - } |
38 | | - }, |
39 | | - ] |
40 | | - |
41 | | - |
42 | | -# Begin creating a dataset with your desired filters (see the RPA user guide for syntax) |
| 15 | +# Begin creating a dataset with your desired filters (see the RPA/EDGE user guide for syntax) |
43 | 16 | # You can then add functions (https://app.ravenpack.com/api-documentation/#indicator-syntax) |
44 | 17 | # Alternatively you can also create the dataset via the query builder and just use the dataset_uuid |
45 | 18 | print("Creating a dataset with a few functions...") |
|
50 | 23 | name="My Indicator dataset", |
51 | 24 | filters={"$and": [{"entity_type": {"$in": ["COMP"]}}, {RELEVANCE: {"$gte": 90}}]}, |
52 | 25 | frequency="daily", |
53 | | - fields=fields, |
| 26 | + custom_fields=[ |
| 27 | + {"avg_d": {"avg": {"field": ESS, "mode": "daily"}}}, |
| 28 | + {"avg_7d": {"avg": {"field": "avg_d", "lookback": 7, "mode": "granular"}}}, |
| 29 | + { |
| 30 | + "buzz_30d": { |
| 31 | + "buzz": {"field": "RP_ENTITY_ID", "lookback": 30, "mode": "granular"} |
| 32 | + } |
| 33 | + }, |
| 34 | + {"newsvolume_d": {"count": {"field": "RP_ENTITY_ID", "mode": "daily"}}}, |
| 35 | + { |
| 36 | + "newsvolume_30d": { |
| 37 | + "avg": {"field": "newsvolume_d", "lookback": 30, "mode": "granular"} |
| 38 | + } |
| 39 | + }, |
| 40 | + ], |
54 | 41 | ) |
55 | 42 | dataset.save() |
56 | 43 |
|
57 | | -# you can also change the fields, (remember to save afterward) |
58 | | -print("Updating fields...") |
59 | | -dataset.fields = [ |
| 44 | +job = dataset.request_datafile( |
| 45 | + start_date="2018-04-10", end_date="2018-04-11", output_format="csv" |
| 46 | +) |
| 47 | +job.save_to_file("output.csv") # This will poll until the file is ready for download |
| 48 | + |
| 49 | +# you can also change the custom_fields, (remember to save afterward) |
| 50 | +print("Updating custom_fields...") |
| 51 | +dataset.custom_fields = [ |
60 | 52 | {"avg": {"avg": {"field": ESS, "lookback": 30}}}, |
61 | 53 | ] |
62 | 54 | dataset.save() |
|
78 | 70 | Dataset.from_dict( |
79 | 71 | { |
80 | 72 | "name": "Dataset with functions and conditions", |
| 73 | + "product": PRODUCT, |
81 | 74 | "fields": ["timestamp_utc", "rp_entity_id", "entity_name", "AVG_REL"], |
82 | 75 | "filters": {}, |
83 | 76 | "custom_fields": [ |
|
0 commit comments