aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--dev/georgia/convert.py19
-rw-r--r--dev/nyc/convert.py11
-rw-r--r--dev/utah/convert.py27
3 files changed, 26 insertions, 31 deletions
diff --git a/dev/georgia/convert.py b/dev/georgia/convert.py
index 10633e1..f734030 100644
--- a/dev/georgia/convert.py
+++ b/dev/georgia/convert.py
@@ -1,17 +1,16 @@
import json
from pprint import pprint
-from collections import defaultdict
with open('cctv.json', 'r') as f:
raw_data = json.load(f)
cameras = raw_data['features']
-sources = defaultdict(list)
+sources = []
for camera in cameras:
- subdiv = camera['properties']['subdivision']
- result = dict()
- result['id'] = camera['properties']['cctv_id']
+ coord = [float(x) for x in camera['geometry']['coordinates']]
+ cam = dict()
+ cam['id'] = camera['properties']['cctv_id']
if 'HLS' in camera['properties']:
url = camera['properties']['HLS']
url = url.replace('http://vss1live.dot.ga.gov:80/lo', '/georgiavss1')
@@ -19,14 +18,14 @@ for camera in cameras:
url = url.replace('http://vss3live.dot.ga.gov:80/lo', '/georgiavss3')
url = url.replace('http://vss4live.dot.ga.gov:80/lo', '/georgiavss4')
url = url.replace('http://vss5live.dot.ga.gov:80/lo', '/georgiavss5')
- result['stream'] = url
+ cam['stream'] = url
elif camera['properties']['url'] is not None:
url = camera['properties']['url']
url = url.replace('http://navigator-c2c.dot.ga.gov/snapshots', '/georgiasnapshots')
- result['url'] = url
+ cam['url'] = url
else:
continue
- result['name'] = camera['properties']['location_description']
- sources[subdiv].append(result)
+ cam['name'] = camera['properties']['location_description']
+ sources.append({'coord': coord, 'cams': [cam]})
with open('sources.json', 'w') as f:
- json.dump(dict(sources), f)
+ json.dump(sources, f)
diff --git a/dev/nyc/convert.py b/dev/nyc/convert.py
index 4b00bfa..2f21861 100644
--- a/dev/nyc/convert.py
+++ b/dev/nyc/convert.py
@@ -9,16 +9,17 @@ data = raw_data['markers']
sources = []
for camera in data:
- result = dict()
- result['id'] = camera['id']
- result['name'] = camera['content']
+ coord = [float(camera['latitude']), float(camera['longitude'])]
+ cam = dict()
+ cam['id'] = camera['id']
+ cam['name'] = camera['content']
fetched = requests.get('https://nyctmc.org/google_popup.php', params={'cid': camera['id']})
match = re.search('http://207.251.86.238/cctv\\d+.jpg', fetched.text)
if match is None:
pprint(camera)
continue
url = match.group()
- result['url'] = url
- sources.append(result)
+ cam['url'] = url
+ sources.append({'coord': coord, 'cams': [cam]})
with open('sources.json', 'w') as f:
json.dump({'NYC': sources}, f)
diff --git a/dev/utah/convert.py b/dev/utah/convert.py
index cbad592..b02fc5c 100644
--- a/dev/utah/convert.py
+++ b/dev/utah/convert.py
@@ -3,27 +3,22 @@ from collections import defaultdict
import xml.etree.ElementTree as ET
from pprint import pprint
-map_areas = [[29, "Statewide"], [44, "I-15 / I-70 Junction"], [30, "Farmington"], [8, "Ogden"], [9, "Park City"],
- [14, "Provo / Orem"], [43, "Richfield"], [7, "Salt Lake City"], [31, "Sandy"], [27, "St. George"],
- [41, "US-6"], [45, "I-15 Technology Corridor Project"]]
-map_areas = dict(map_areas)
-
-names = dict()
+info = dict()
for place in ET.parse('UtahKML.xml').findall('.//{*}Placemark'):
cam_id = int(place.attrib['id'])
cam_name = place.find(".//{*}SimpleData[@name='DisplayName']").text
- names[cam_id] = cam_name
+ coords = [float(x) for x in place.find(".//{*}coordinates").text.split(',')]
+ info[cam_id] = {'name': cam_name, 'coords': coords}
-sources = defaultdict(list)
+sources = []
with open('cameras.json', 'r') as f:
places = json.load(f)
for place in places:
- area = place['mapAreaId']
- if area in map_areas:
- area = map_areas[area]
- cam_id = place['entityId']
- url = place['url']
- name = names[cam_id]
- sources[area].append({'id': cam_id, 'url': url, 'name': name})
+ cam_id = place['entityId']
+ url = place['url']
+ this_info = info[cam_id]
+ name = this_info['name']
+ coords = this_info['coords']
+ sources.append({'coords': coords, 'cams': [{'id': cam_id, 'url': url, 'name': name}]})
with open('sources.json', 'w') as f:
- json.dump(dict(sources), f)
+ json.dump(sources, f)