Parsing and converting table to .txt files

3974
5
06-18-2014 07:43 AM
DavidBuehler
Occasional Contributor III
The setup is we currently do not have an automated work order system in place.  IT has approached me to build an internet GIS based service request application, of which the local government model template "Citizen Service Request" works for what we want it to do.  On the back end I am scripting out how to update the features from unassigned to assigned after creating some .csv/.txt files to attach to emails sent via SQL Server Business Intelligence Development Studio.  I have put together the python code below as a proof of concept before switching out the necessary items for our SDE database. This will be a stand-alone script running on the server at a regular interval (if that matters? Suggestions?).

My questions are:
1) I get it to produce an intermediate table based on if the STATUS field has the value 'Unassigned'. I would like to take it a step further and have it parse out based on request type, and the unassigned status.  Example would be: potholes, street sign damage, and debris in road to be written to one table. Sidewalk damage, weeds, and graffiti to another, and so on till I get the attachments for the group of people I need to send them to.  Is there an elegant way of doing that or do I need to have multiple table to table conversions and expressions?

2) Right now it converts that one table to a text file that can be added as an attachment in the email.  I would like to take it to the next level, which is to convert the created tables from question 1.  I would assume I could just copy the code over and over with the appropriate tables to convert, is there a better way to do that?

3) Building on question 2, If there are no records I would like it to not clunk out because there are no records in the table to convert. I am pretty sure I need an if else or if then statement, but I am not proficient with python scripting.  Any help would appreciated.
#importing modules
import arcpy
import os
import csv

# set the workspace
arcpy.env.workspace = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/ServiceRequest.gdb"
arcpy.env.overwriteOutput = True

#setting variables
workspace = arcpy.env.workspace
fc = "/ServiceRequests"
fields = ["STATUS","REQUESTID"]
expression = arcpy.AddFieldDelimiters ("workspace", "STATUS") + " = 'Unassigned'"

print "Starting transport Captain"

#setting local variables and setting up Table to Table Conversion
inTable = "ServiceRequests"
outLocation = workspace
outTable = "Unassigned"

#Table to Table conversion

arcpy.TableToTable_conversion (inTable, outLocation, outTable, expression)

print "Table made it to destination Captain"

#updating records from unassigned to assigned
fcToUpdate = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/ServiceRequest.gdb/ServiceRequests"
affectedField = "STATUS"
oldValue =  'Unassigned'
newValue = 'Assigned'

queryString = '"' + affectedField + '" = ' + "'" + oldValue + "'"

with arcpy.da.UpdateCursor(fcToUpdate, (affectedField,), queryString) as cursor:
   for row in cursor:
      row[0] = newValue
      cursor.updateRow(row)


# Converting FileGeoDatabase Table to .TXT for emailing
print "Starting conversion GIS to CSV Captain"

table = outTable
outfile = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/Unassigned.txt"

#--first lets make a list of all of the fields in the table
fields = arcpy.ListFields(table)
field_names = [field.name for field in fields]

with open(outfile,'wb') as f:
    w = csv.writer(f)
    #--write all field names to the output file
    w.writerow(field_names)

    #--now we make the search cursor that will iterate through the rows of the table
    for row in arcpy.SearchCursor(table):
        field_vals = [row.getValue(field.name) for field in fields]
        w.writerow(field_vals)
    del row
    del table
    del outTable

print "Mission Success Captain. Returning to Base"
Tags (2)
0 Kudos
5 Replies
PaulSchneider
Occasional Contributor
1) You'd need something to denote the processing values to separate into which text file.  This could be a separate field, or (if you dare) hard-code your Python... but I'd only recommend the latter if your separations are static.  Assuming you have a way to separate, I'd push the separate value and their output text file to a dict, and move the process of writing the data to a separate function.  Then while processing the rows, you can specify which file they go to.  Something like:

import arcpy, os, time # you don't need os or time if you use a constant log file name (line 18)

def log(txt, inFile, newRun):
    try:
        if newRun == True:
            csvFile = open(inFile, 'w')
        elif newRun == False:
            csvFile = open(inFile, 'a')
        csvFile.writelines(txt)
        csvFile.flush()
        csvFile.close()
    except IOError:
        print 'Unable to write to file ' + inFile

def main():
    inDS = r"C:\TEMP\JUNK\ServiceRequest.gdb\ServiceRequests"
    whereClause = r"STATUS = 'Unassigned'"
    statusField = "STATUS"
    deptField = u'DEPT_ASSIGNEE' ##determines how to separate the tables (see table separation note below)
    logDir = r"C:\TEMP\JUNK"
    updateTo = "Assigned"

    fieldArr = []
    for fld in arcpy.ListFields(inDS):
        fieldArr.append(fld.name)
    fieldArr.remove('SHAPE')

    ##Table Separation Note
    ##Create the list of output text files (must be based on a common value) - note does not have to be a field in the data...
    ##you could create a dictionary to hold these values in code, but doing so would cause the code to go stale upon new unbeforeseen values
    with arcpy.da.SearchCursor(inDS,fieldArr) as sc:
        fileStore = {}
        for scr in sc:
            for j in range(len(scr)):
                if j == fieldArr.index(deptField) and  not in fileStore.keys():
                    fileStore[scr] = logDir + os.sep + time.strftime('%Y%m%d', time.localtime(time.time())) + str(scr) + '.csv'

    headerLine = ""
    for i in range(len(fieldArr)):
        if i != len(fieldArr) - 1:
            headerLine += str(fieldArr) + ","
        else:
            headerLine += str(fieldArr) + "\n"

    for k,v in fileStore.iteritems():
        log(headerLine, v, True)

    fieldxi = fieldArr.index(statusField)
    sepxi = fieldArr.index(deptField)

    try:
        with arcpy.da.UpdateCursor(inDS, fieldArr, whereClause) as cursor:
            for row in cursor:
                rowLine = ""
                fileWrite = ""
                for i in range(len(row)):
                    if i == sepxi:
                        fileWrite = fileStore[row]
                    if i != len(row) - 1:
                        rowLine += str(row) + ","
                    else:
                        rowLine += str(row) + "\n"
                row[fieldxi] = updateTo
                cursor.updateRow(row)
                log(rowLine, fileWrite, False)
    except:
        print 'Error updating the dataset'

if __name__ == '__main__':
    main()


2) The smtplib module should be able to assist you with sending email as attachments. 

3) Using a try/except is considered good practice for error handling you should add in anyhow; to expand this you could add an if statement with a break to exit a segment of code.

Good Luck
0 Kudos
DavidBuehler
Occasional Contributor III

After meeting with departments, and compiling their feedback.  What in the code above would need to be modified so that it runs against a single department, and writes to already created file? Basically, we do not want to spam our departments, nor override all the files at the same time especially if their is no new request.  We are thinking of using triggers on the database to run the script against a new entry, and immediately notify that a department.  Any issues with that?

0 Kudos
DavidBuehler
Occasional Contributor III
Thanks Paul.  This definitely helps and thanks for the links.  We'll see where this project goes from here. I am sure it will morph from the initial request.
0 Kudos
DavidBuehler
Occasional Contributor III

I know it has been a while, but the project scope has changed.  I modified the code below.  It works great if the data is not versioned, on the default version of the database, or in a file geodatabase.  Is there a limitation when working with a version of a sde database? For instance, a webediting version of default.

# Orginal Code Created:     20/06/2014
# Orginal Copyright:   (c) pschneider 2014
# Modified Code Created:  December 08, 2014
# Modified Copyright: (c) DBuehler 2015
#-------------------------------------------------------------------------------
import arcpy # Libraries to import


# CSV writing function. Seperate function that is called int the main function
def log(txt, inFile, newRun):
    try:
        if newRun == True:
            csvFile = open(inFile, 'w')
        elif newRun == False:
            csvFile = open(inFile, 'a')
        csvFile.writelines(txt)
        csvFile.flush()
        csvFile.close()
    except IOError:
        print 'Unable to write to file ' + inFile


#Main function that creates a field array of fields, then writes the headers, and then rights matching rows and updates them
def main():
    inDS = r"C:\TEMP\JUNK\Testing.gdb\Testing"
    whereClause = r"STATUS = 'New' AND (PUBLICSTREETS = 'Damaged/Missing Street Sign' OR PUBLICSTREETS = 'Icy Intersection/Road' OR PUBLICSTREETS = 'Pothole' OR PUBLICSTREETS = 'Public Vegetation Concern')"
    statusField = "STATUS"
    logDir = r"C:\TEMP\JUNK\StreetDeptStreetIssues.csv"
    updateTo = "Assigned"


    fieldArr = []
    for fld in arcpy.ListFields(inDS):
        fieldArr.append(fld.name)
    fieldArr.remove('SHAPE')
    fieldArr.remove('GlobalID')
    fieldArr.remove('REQUESTID')
    fieldArr.remove('SOCIALLOGIN')


 # Part where the headers get written to the log directory
    headerLine = ""
    for i in range(len(fieldArr)):
        if i != len(fieldArr) - 1:
            headerLine += str(fieldArr) + ","
        else:
            headerLine += str(fieldArr) + "\n"


        log(headerLine, logDir, True)


    fieldxi = fieldArr.index(statusField)


    try:
        with arcpy.da.UpdateCursor(inDS, fieldArr, whereClause) as cursor:
            for row in cursor:
                rowLine = ""
                fileWrite = ""
                for i in range(len(row)):
                    if i == whereClause:
                        logDir = cursor[row]
                    if i != len(row) - 1:
                        rowLine += str(row) + ","
                    else:
                        rowLine += str(row) + "\n"
                row[fieldxi] = updateTo
                cursor.updateRow(row)
                log(rowLine, logDir, False)
    except:
        print 'Error updating the dataset'


if __name__ == '__main__':
    main()
0 Kudos
PaulSchneider
Occasional Contributor

Scoping it inside an edit session will likely be your ticket here.

    ## CREATE VERSIONED EDIT SESSION
    with arcpy.da.Editor(gisConn) as edit:
        edit.startEditing(True, True) #see the Help; multi-user mode
        edit.startOperation()

        #Do work

        ## STOP EDITING
        edit.stopOperation()
        edit.stopEditing(True) # True saves edits
0 Kudos