· 4 years ago · Apr 20, 2021, 07:34 AM
1import requests
2import json
3import time
4import pickle
5import os
6
7"""
8
9"""
10
11mempool_list = ['show/text/Mempool', 'pick/cm.mempool.time_since_last_block', 'pick/cm.fees.immediate', 'pick/cm.fees.hour', 'pick/cm.fees.day', 'pick/cm.fees.week', 'pick/cm.mempool.transactions', 'pick/cm.mempool.vsize', 'pick/cm.mempool.blocks_to_clear', 'pick/cm.mempool.pending_fees']
12
13mining_list = ['show/text/Mining', 'pick/cm.mining.hash_rate_2016_blocks', 'pick/cm.mining.difficulty', 'pick/cm.mining.difficulty_epoch', 'pick/cm.mining.last_difficulty_change', 'pick/cm.mining.block_time_2016_blocks', 'pick/cm.retarget.blocks_to_retarget', 'pick/cm.retarget.retarget_date', 'pick/cm.retarget.estimated_difficulty_change', 'pick/cm.retarget.block_time_diff_epoch', 'pick/cm.mining_economics.block_subsidy', 'pick/cm.mining_economics.avg_fees_per_block', 'pick/cm.mining_economics.daily_phash_revenue']
14
15lightning_list = ['show/text/LN', 'pick/cm.lightning.total_capacity', 'pick/cm.lightning.capacity_value', 'pick/cm.lightning.total_nodes', 'pick/cm.lightning.total_channels', 'pick/cm.lightning.tor_capacity', 'pick/cm.lightning.percentage_tor_capacity', 'pick/cm.lightning.tor_nodes']
16
17liquid_list = ['show/text/Liquid', 'pick/cm.liquid.peg_in_capacity', 'pick/cm.liquid.peg_in_capacity_value', 'pick/cm.liquid.liquid_block_height', 'pick/cm.liquid.liquid_chain_size']
18
19network_list = ['show/text/Network', 'pick/cm.blockchain.block_height', 'pick/cm.blockchain.money_supply', 'pick/cm.blockchain.percentage_issued', 'pick/cm.blockchain.utxo_set_size', 'pick/cm.bitnodes.reachable_bitcoin_nodes', 'pick/cm.bitnodes.bitcoin_tor_nodes']
20
21gold_list = ['show/text/Goooold', 'pick/cm.commodity_markets.bitcoin_priced_in_gold', 'pick/cm.commodity_markets.bitcoin_vs_gold_market_cap']
22
23wall_st_list = ['show/text/WallSt', 'pick/cm.treasuries.held_in_corp_treasuries', 'pick/cm.treasuries.value_in_corp_treasuries']
24
25market_list = ['show/text/Market', 'pick/cm.markets.price', 'pick/cm.markets.sats_per_dollar', 'pick/cm.markets.market_capitalization']
26
27whirlpool_list = ['show/text/SStats?pair=POOL/SIDE', 'pick/cm.whirlpool.unspent_capacity', 'pick/cm.whirlpool.unspent_value', 'pick/cm.whirlpool.unspent_count', 'pick/cm.whirlpool.tx0_volume_30_days', 'pick/cm.whirlpool.spent_cycle_output_30_days', 'pick/cm.whirlpool.cycles_30_days']
28
29digest_list = {'mempool':mempool_list, 'mining':mining_list, 'lightning':lightning_list, 'liquid':liquid_list, 'network':network_list, 'gold':gold_list, 'wall_st':wall_st_list, 'market':market_list, 'whirlpool':whirlpool_list} #remove test_list
30
31clock_end_point = '192.168.1.89' # make a function to set this with input and persist
32
33'''
34block_query handles fetching the current block height from Blockstream.info's API to make sure the next clock update displays the block height if it has changed. The time.sleep call is to make sure if the calling function's next action on return is to push a clock update it doesn't return an error and fail due to the hardware 1 minute refresh limit. The block height is returned to ultimately persist on shutdown.
35'''
36def block_query(cached_block):
37 current_block = requests.get("https://blockstream.info/api/blocks/tip/height").text
38 if int(current_block) > int(cached_block):
39 requests.get("http://"+clock_end_point+"/api/pick/electrum.BTC.block_height")
40 time.sleep(60)
41 return current_block
42 else:
43 return cached_block
44'''
45display_reset resets the default clock update window to 5 minutes and pushes to the next item in the clock's onboard display queue at the end of a digest_controller call. Might be generalized in future if slower refresh windows are widely used.
46'''
47def display_reset():
48 requests.get("http://"+clock_end_point+"/api/action/update?rate=5")
49 requests.get("http://"+clock_end_point+"/api/action/next")
50 return
51
52'''
53digest_controller handles pushing the desired data points in a category at every 1 minute update window for the clock display until finished. At every update window it calls block_query before pushing the next data point to maintain real time block height updates. Data categories are hard coded for now but support for users constructing their own will be built out in future.
54'''
55def digest_controller(digest_list, cached_block):
56 requests.get("http://"+clock_end_point+"/api/action/pause")
57 for i in digest_list:
58 cached_block = block_query(cached_block)
59 requests.get("http://"+clock_end_point+"/api/"+str(i))
60 time.sleep(60)
61 display_reset()
62 return cached_block
63'''
64time_query takes the schedule list from clock_controller and runs a check to see if a scheduled information digest should be pushed to the clock's display. The alarm_catch is a one minute offset into the future in the event block_query pushes a block height to the display at the scheduled time of an information digest.
65'''
66def time_query(schedule, cached_block):
67 for i in schedule:
68 x = 0
69 while x < len(schedule):
70 schedule_cache = schedule[x]
71 alarm = schedule_cache[0]
72 alarm_catch = schedule_cache[1]
73 if alarm == time.localtime()[3:5]:
74 cached_block = digest_controller(schedule_cache[2], cached_block)
75 elif alarm_catch == time.localtime()[3:5]:
76 cached_block = digest_controller(schedule_cache[2], cached_block)
77 x += 1
78 return cached_block
79
80'''
81clock_controller is the core function handling making sure the approriate actions are taken at each refresh window. Every 60 seconds it checks to see if the external shutdown script was run (passing the cached_block to persist if it was), if not it calls block_query and then time_query to see if there is a new block height or information digest to display.
82'''
83def clock_controller(schedule, cached_block): #build out infinite loop
84 while True:
85 if pickle.load(open("shutdown.pkl", "rb")) == True:
86 return cached_block
87 else:
88 cached_block = block_query(cached_block)
89 cached_block = time_query(schedule, cached_block)
90 time.sleep(60)
91 return cached_block
92
93'''
94schedule_creator promts the user for input about which data categories they would like displayed on the clock and at what time to push the data items to the clock. It returns the information for peristed storage. The tuple typing of the ultimate alarm values is to be compatible for comparison with time.localtime in time_query.
95'''
96def schedule_creator(digest_list):
97 schedule = []
98 print('''
99 |Welcome to the Block Clock Scheduler|
100
101 This is an automated program to allow
102 more flexible control over what your
103 Blockclock Mini displays. This will
104 by default interrupt whatever is on
105 the display with the current block
106 height if a new block is found every
107 one minutes, then return to the default
108 display queue. It can also be set to
109 interrupt the default queue at scheduled
110 times to display informational digests
111 for different categories of info from
112 Clark Moody's at one minute intervals.
113 Have fun.\n''')
114 input("Press [Enter] to continue.")
115 print("\nHere are the default information categories:")
116 for key, value in digest_list.items():
117 print(key)
118 input("\nPress [Enter] to continue.")
119 print('''\nHow many info digests would you like to schedule?
120 (Keep in mind if you schedule one after another such that
121 the first one cannot complete, the second one will not be
122 displayed. Check the readme for how long each digest will
123 take, and remember block updates will interrupt and add
124 another minute to completion time.\n''')
125 digest_num = int(input("Number of digests: "))
126 for i in range(digest_num):
127 print("\nPick a category of information:\n")
128 for key, value in digest_list.items():
129 print(key)
130 category = digest_list[input("\nCategory: ")]
131 print("\nEnter times in 24 hour format. i.e. 4 PM = 16.")
132 alarm_hour = int(input("\nGive me an hour: "))
133 alarm_minute = int(input("Give me a minute: "))
134 alarm_time = tuple([alarm_hour, alarm_minute])
135 alarm_time_catch = tuple([alarm_hour, alarm_minute + 1])
136 schedule_entry = [alarm_time, alarm_time_catch, category]
137 schedule.append(schedule_entry)
138 return schedule
139
140'''
141schedule_controller checks for a persisted schedule file, if none it found it calls schedule_creator for the user to create one and then writes it to disk.
142'''
143def schedule_controller():
144 if os.path.isfile("schedule.pkl") == True:
145 print("File already exists")
146 schedule = pickle.load(open("schedule.pkl", "rb"))
147 return schedule
148 else:
149 schedule = schedule_creator(digest_list)
150 pickle.dump(schedule, open("schedule.pkl", "wb"))
151 return schedule
152'''
153main handles loading cached_block (if present) and calling the schedule_controller to load an existing schedule or prompt the user to create some if one doesn't exist. It then passes the schedule and cached_block to the clock_controller and waits for the returned cached_block to persist on shutdown.
154'''
155def main():
156 pickle.dump(False, open("shutdown.pkl", "wb"))
157 if os.path.isfile("height.pkl") == True:
158 cached_block = pickle.load(open("height.pkl", "rb"))
159 else:
160 cached_block = 0
161 schedule = schedule_controller()
162 print("The program will keep running until you run 'stopclockd.py' in this directory")
163 cached_block = clock_controller(schedule, cached_block)
164 pickle.dump(cached_block, open("height.pkl", "wb"))
165
166main()
167
168
169'''
170
171'''
172
173
174'''
175
176EDGE CASES:
177-scheduling something with a default refresh and erroring
178-if a block comes in twice in a row at one minute intervals
179
180BUG NOTES
181- triggered a blockheight display after a new block came in, but display previous blockheight
182
183
184THINGS TO DO
185- get off your lazy ass and push a raw translation of blockstream.info blockheight instead of call electrum
186- refactor hardcoded digest_lists into a global list with a default creation of sub-buckets? at least add ability to make new ones. (likely require moving hardcoded lists to data file)