gmba_django/app/convert.py

204 lines
8.1 KiB
Python
Raw Normal View History

import csv
import json
import re
2021-07-27 04:30:44 +00:00
from os.path import isfile
from app.models import *
from app.formats import *
2021-07-27 04:30:44 +00:00
from django.db import transaction
2021-07-27 04:30:44 +00:00
# Correct commas inside of a linked field
def fix_bracketed_lists(data):
for fix in re.findall(r'\([^\s]*,[ ]*[^\s]*\)', data):
data = data.replace(fix, fix.replace(',', ' /'))
return data
2021-07-27 04:30:44 +00:00
# Check for valid link
def fix_url(link):
if len(link) > 3 and not link.startswith('http'):
link = 'http://' + link
# link = link.split(';')[0]
return link
2021-07-27 04:30:44 +00:00
# Create linked objects
def add_linked(person, obj, data):
field = []
2021-07-27 04:30:44 +00:00
# TODO: fuzzy matching instead of lower()
items = fix_bracketed_lists(data).lower()
items = items.replace(';', ',').split(',')
2021-07-27 04:30:44 +00:00
for i in items:
n = i.strip()
if len(n) < 3:
continue
tgt = obj.objects.filter(name=n).first()
2021-07-27 04:30:44 +00:00
if not tgt:
tgt = obj()
tgt.name = n
tgt.save()
2021-07-27 04:30:44 +00:00
field.append(tgt)
return field
2021-07-27 04:30:44 +00:00
2021-07-27 04:30:44 +00:00
# Fetch an object by source_id (numeric identifier used in source DB)
def get_by_id(rowid, obj, first=True):
if type(rowid) is str and rowid.isdigit():
rowid = int(rowid)
if type(rowid) is int:
l = obj.objects.filter(source_id=rowid)
if first:
return l.first(), rowid
else:
return l, rowid
2021-07-27 04:30:44 +00:00
return None, None
2021-07-27 04:30:44 +00:00
# Quick check of the number of lines
def get_total_rows_csv(filename):
with open(filename) as f:
for i, l in enumerate(f):
pass
return i + 1
2021-07-27 04:30:44 +00:00
# Search index routine
def reindex_data():
for i, p in enumerate(Person.objects.all()):
2021-07-27 04:30:44 +00:00
p.index()
p.save()
2021-07-27 04:30:44 +00:00
2021-07-27 04:30:44 +00:00
# Data update routine
def refresh_data(filename, fmt=None):
count = 0
rowcount = 0
if not isfile(filename):
msg = "Missing data: %s - refresh aborted." % fmt['filename']
print(msg)
yield msg, "error"
2021-07-27 04:30:44 +00:00
return None
if fmt['extension'] == 'csv':
totalrows = get_total_rows_csv(filename)
with open(filename, 'rt', encoding='utf-8', errors='ignore') as csvfile:
datareader = csv.DictReader(csvfile)
for row in datareader:
rowcount += 1
2021-07-27 05:00:20 +00:00
if row is None:
continue
yield rowcount, rowcount / totalrows
2021-07-27 04:30:44 +00:00
# # Ensure any new data is flushed from time to time
# if count % 25 == 0:
# db.session.commit()
2021-07-27 04:30:44 +00:00
for r in fmt['required']:
if not r in row:
msg = "Missing attribute in %s (%s)" % (r, fmt['filename'])
print(msg)
yield msg, "error"
2021-07-27 04:30:44 +00:00
return None
if fmt['dataformat'] is DataFormat.PERSON_DETAIL:
person, source_id = get_by_id(row['ID'], Person)
if not person:
person = Person.objects.filter(first_name=row['First name'], last_name=row['Last name']).first()
2021-07-27 04:30:44 +00:00
if not person:
person = Person(first_name=row['First name'], last_name=row['Last name'], source_id=row['ID'])
# Update data fields
person.source_id = source_id
person.title = row['Title']
person.organisation = row['Organisation English']
2021-12-15 11:10:09 +00:00
print("Country = %s" % row['country'])
if row['country'] is None or row['country'].strip() == '':
row['country'] = 0
c = Country.objects.get(id=row['country'])
person.country = c
2021-07-27 04:30:44 +00:00
person.position = row['Position']
person.biography = row['Biography']
person.contact_email = row['e-mail 1']
person.personal_url = fix_url(row['URL'])
with transaction.atomic():
research_methods = add_linked(person, Method, row['Methods'])
methods_people = [MethodsPeople.objects.get_or_create(method_id=m.id, person_id=person.id) for m in research_methods]
research_scales = add_linked(person, Scale, row['Scale'])
scales_people = [ScalesPeople.objects.get_or_create(scale_id=s.id, person_id=person.id) for s in research_scales]
research_taxa = add_linked(person, Taxon, row['Taxa'])
taxa_people = [TaxaPeople.objects.get_or_create(taxon_id=t.id, person_id=person.id) for t in research_taxa]
research_fields = add_linked(person, Field, row['Field of expertise'])
fields_people = [FieldsPeople.objects.get_or_create(field_id=f.id, person_id=person.id) for f in research_fields]
2021-07-27 04:30:44 +00:00
person.index()
person.save()
2021-07-27 04:30:44 +00:00
count = count + 1
elif fmt['dataformat'] is DataFormat.RESOURCE_DETAIL:
res, source_id = get_by_id(row['ID'], Resource)
if not res: res = Resource(source_id=source_id)
res.title = row['Title']
res.citation = row['Citation']
res.url = fix_url(row['URL'].strip('#')) # remove weird #formatting#
2021-07-27 04:30:44 +00:00
res.abstract = row['Abstract']
res.save()
2021-07-27 04:30:44 +00:00
count = count + 1
elif fmt['dataformat'] is DataFormat.RANGE_DETAIL:
2021-12-15 11:10:09 +00:00
rng, source_id = get_by_id(row['Range_ID'], MountainRange)
if not rng: rng = MountainRange(source_id=source_id)
2021-07-27 04:30:44 +00:00
rng.gmba_id = row['GMBA_ID']
rng.name = row['RangeName']
rng.countries = row['Countries']
rng.save()
2021-07-27 04:30:44 +00:00
count = count + 1
elif fmt['dataformat'] is DataFormat.PERSON_RESOURCE:
rzs, source_id = get_by_id(row['Resource'], Resource, first=False)
if not rzs or not rzs.first(): continue
ppl, source_id = get_by_id(row['Person'], Person, first=False)
if not ppl or not ppl.first(): continue
for person in ppl:
person.resources = []
2021-07-27 05:00:20 +00:00
for r in rzs:
person.resources.append(r)
person.save()
2021-07-27 04:30:44 +00:00
count = count + 1
elif fmt['dataformat'] is DataFormat.PERSON_RANGE:
2021-12-15 11:10:09 +00:00
rzs, source_id = get_by_id(row['MountainRange'], MountainRange, first=False)
2021-07-27 05:00:20 +00:00
if not rzs or not rzs.first():
continue
2021-07-27 04:30:44 +00:00
ppl, source_id = get_by_id(row['Person'], Person, first=False)
if not ppl or not ppl.first(): continue
for person in ppl:
person.ranges = []
2021-07-27 05:00:20 +00:00
for r in rzs:
person.ranges.append(r)
person.save()
2021-07-27 04:30:44 +00:00
count = count + 1
elif fmt['extension'] == 'geojson':
ranges_missing = []
with open(filename, 'rt', encoding='utf-8', errors='ignore') as jsonfile:
jsondata = json.load(jsonfile)
if fmt['dataformat'] is DataFormat.RANGE_SHAPES:
totalrows = len(jsondata['features'])
for f in jsondata['features']:
yield count, count / totalrows
2021-07-27 04:30:44 +00:00
count = count + 1
p = f['properties']
2021-12-15 11:10:09 +00:00
rge = MountainRange.objects.filter(gmba_id=p['GMBA_ID']).first()
2021-07-27 04:30:44 +00:00
if not rge:
ranges_missing.append(p['GMBA_ID'])
continue
rge.name = p['Name']
for c in ['Country_1', 'Country_2_']:
if c in p: rge.countries = p[c]
rge.save()
print("Warning: %d ranges not found" % len(ranges_missing))
print("[%s]" % ', '.join(ranges_missing))
2021-07-27 04:30:44 +00:00
yield None, None
return count