Darren, A couple of questions, I would like to do the same for another field, but there is not always a guaranteed 1-1 relationship between items and comments. Is there a way to implement a loop for the rows instead of using the index? For instance, when I try to insert a comments field and concatenate all comments for the associated request, my output becomes truncated giving me only 1 itemcount and itemtype.
"ListOfLa311ServiceRequestNotes": {
"La311ServiceRequestNotes": [
{
"Comment": "Out on the sidewalk near the curb. Hopefully it is still there.",
"CommentType": "Address Comments",
"CreatedByUser": "MYLATHREEONEONE",
"CreatedDate": "02/17/2015 16:53:26",
"Date1": "",
"Date2": "",
"Date3": "",
"FeedbackSRType": "",
"IntegrationId": "021720151654176661",
"IsSrNoAvailable": "",
"ListOfLa311SrNotesAuditTrail": {},
"Notification": "N",
"Text1": ""
},
{
"Comment": "So glad to get rid of this old junk. Thanks.",
"CommentType": "External",
"CreatedByUser": "MYLATHREEONEONE",
"CreatedDate": "02/17/2015 16:53:26",
"Date1": "",
"Date2": "",
"Date3": "",
"FeedbackSRType": "",
"IntegrationId": "021720151654176662",
"IsSrNoAvailable": "",
"ListOfLa311SrNotesAuditTrail": {},
"Notification": "N",
"Text1": ""
}
]
},
import json
import jsonpickle
import requests
import arcpy
fc = "C:\MYLATesting.gdb\MYLA311"
if arcpy.Exists(fc):
arcpy.Delete_management(fc)
ListTable ="C:\MYLATesting.gdb\MYLA311Dissolve"
if arcpy.Exists(ListTable):
arcpy.Delete_management(ListTable)
f2 = open('C:\Users\Administrator\Desktop\DetailView.json', 'r')
data2 = jsonpickle.encode( jsonpickle.decode(f2.read()) )
url2 = "myURL"
headers2 = {'Content-type': 'text/plain', 'Accept': '/'}
r2 = requests.post(url2, data=data2, headers=headers2)
decoded2 = json.loads(r2.text)
items = []
for sr in decoded2['Response']['ListOfServiceRequest']['ServiceRequest']:
SRAddress = sr['SRAddress']
latitude = sr['Latitude']
longitude = sr['Longitude']
SRNumber = sr['SRNumber']
FirstName = sr['FirstName']
LastName = sr['LastName']
HomePhone = sr['HomePhone']
for ew in sr["ListOfLa311ElectronicWaste"][u"La311ElectronicWaste"]:
CommodityType = ew['Type']
ItemType = ew['ElectronicWestType']
ItemCount = ew['ItemCount']
for comm in sr["ListOfLa311ServiceRequestNotes"][u"La311ServiceRequestNotes"]:
Comment = comm['Comment']
items.append((SRAddress,
latitude,
longitude,
CommodityType,
ItemType,
SRNumber,
ItemCount,
FirstName,
LastName,
Comment,
HomePhone))
import numpy as np #NOTE THIS
dt = np.dtype([('SRAddress', 'U40'),
('latitude', '<f8'),
('longitude', '<f8'),
('Type', 'U40'),
('ElectronicWestType', 'U40'),
('SRNumber', 'U40'),
('ItemCount', 'U40'),
('FirstName', 'U40'),
('LastName', 'U40'),
('Comment', 'U40'),
('HomePhone', 'U40')])
arr = np.array(items,dtype=dt)
sr = arcpy.SpatialReference(4326)
arcpy.da.NumPyArrayToFeatureClass(arr, fc, ['longitude', 'latitude'], sr )
arcpy.AddField_management(fc, "SRList", "TEXT", 255)
arcpy.AddField_management(fc, "Comments", "TEXT", 255)
valueList = [] # empty list
with arcpy.da.SearchCursor(fc,["SRAddress","SRNumber"]) as cursor:
for row in cursor:
# get all combinations of SRAddress & SRNumber
valueList.append(row[0]+str(row[1]))
# make unique value set
valueSet = set(valueList)
# convert set to dictionary keys
valueDict = dict.fromkeys(valueSet)
with arcpy.da.SearchCursor(fc,["SRAddress","SRNumber","ElectronicWestType","ItemCount", "Comment"]) as cursor:
for row in cursor:
# create comma-separated list of values matching each dictionary key
if not valueDict[row[0]+str(row[1])]:
valueDict[row[0]+str(row[1])] = str(row[2]) + ', ' + str(row[3])
else:
valueDict[row[0]+str(row[1])] = str(row[2]) + ', ' + str(row[3]) + ', ' + str(valueDict[row[0]+str(row[1])])
with arcpy.da.UpdateCursor(fc,["SRAddress","SRNumber","SRList"]) as cursor:
for row in cursor:
# write comma-separated list for each row, matching SRAddress & SRNumber
row[2] = valueDict[row[0]+str(row[1])]
cursor.updateRow(row)
arcpy.Dissolve_management(fc, "C:\MYLATesting.gdb\MYLA311Dissolve", ["SRNumber", "SRAddress", "Type", "SRList", "FirstName", "LastName", "HomePhone", "Comment"])
print json.dumps(decoded2, sort_keys=True, indent=4)