|
#/usr/bin/env ruby |
|
require "csv" |
|
######################################################################################### |
|
# Translate Pivotal Tracker's CSV export into Kanbanery's CSV Import |
|
# --------------------------------------------------------------------------------------- |
|
# Pivotal's CSV fields: |
|
# |
|
# Id |
|
# Story |
|
# Labels |
|
# Iteration |
|
# Iteration Start |
|
# Iteration End |
|
# Story Type |
|
# Estimate |
|
# Current State |
|
# Created at |
|
# Accepted at |
|
# Deadline |
|
# Requested By |
|
# Owned By |
|
# Description |
|
# URL |
|
# Comment |
|
# ... |
|
# Task |
|
# Task Status |
|
# ... |
|
# |
|
# --------------------------------------------------------------------------------------- |
|
# Kanbanery CSV fields: |
|
# |
|
# title |
|
# type |
|
# estimate |
|
# priority |
|
# description |
|
# column_name |
|
# creator_email |
|
# |
|
######################################################################################### |
|
|
|
# configure the input (Pivotal) and output (Kanbanery) csv files here |
|
|
|
input_filename = "pivotal.csv" |
|
output_filename = "kanbanery.csv" |
|
|
|
# configure the email address for the task creator |
|
|
|
@@creator_email = 'email@mail.com' |
|
|
|
# bug column name (map all bugs into this column) |
|
|
|
@@bug_column = "Legacy Bugs" |
|
|
|
# load into icebox? (overrides the bug column and all other column names) |
|
|
|
@@icebox = false |
|
|
|
######################################################################################### |
|
|
|
stories = [] |
|
@@separator = "\n\n---------------------------------\n\n" |
|
|
|
class Story |
|
attr_accessor :title, :labels, :type, :description, :tasks, :comments, :url, :created_at, :id |
|
|
|
def full_title |
|
self.title + (self.labels == "" ? "" : " (#{ self.labels })") |
|
end |
|
|
|
def full_description |
|
output = "" |
|
|
|
output << self.full_title + @@separator if self.full_title.size > 255 |
|
|
|
output << self.description + @@separator |
|
|
|
output << @@separator unless self.comments.size.zero? |
|
output << self.comments.each{ |k,v| v }.join(@@separator) |
|
|
|
output << @@separator unless self.tasks.size.zero? |
|
output << self.tasks.each{ |k,v| v }.join(@@separator) |
|
|
|
output << "Original URL: " + self.url |
|
output << "\nOriginal Creation Date: " + self.created_at |
|
output << "\nPivotal Tracker ID: " + self.id |
|
|
|
output |
|
end |
|
|
|
def destination_column |
|
return "Icebox" if @@icebox |
|
return "Refactoring" if self.labels.include? "refactoring" |
|
return @@bug_column if self.type == "bug" |
|
"Imported" |
|
end |
|
|
|
def priority |
|
0 |
|
end |
|
|
|
def to_kanbanery |
|
[self.full_title[0,255], self.type, '', self.priority, self.full_description, self.destination_column, @@creator_email] |
|
end |
|
end |
|
|
|
# parse the pivotal tracker csv & assign it headers |
|
# pivotal csv is a mess of mult-lines - this parse style is based on: https://gist.github.com/894624 |
|
raw_stories = CSV.read(input_filename) |
|
headers = raw_stories.shift |
|
|
|
revised_headers = [] |
|
# we need unique headers since there is more than one comment typically |
|
headers.each_with_index do |header, index| |
|
if header == "Comment" |
|
revised_headers << header + " " + index.to_s |
|
elsif header == "Task" |
|
revised_headers << header + " " + index.to_s |
|
elsif header == "Task Status" |
|
revised_headers << header + " " + index.to_s |
|
else |
|
revised_headers << header |
|
end |
|
end |
|
|
|
# take the headers and map them to the csv lines to produce a hash |
|
# the following is based on: http://snippets.dzone.com/posts/show/3899 |
|
string_data = raw_stories.map {|row| row.map {|cell| cell.to_s } } |
|
array_of_hashes = string_data.map {|row| Hash[*revised_headers.zip(row).flatten] } |
|
|
|
# create objects from the hash |
|
array_of_hashes.reverse.each do |row| |
|
story = Story.new |
|
|
|
story.id = row['Id'] |
|
story.title = row['Story'] |
|
story.labels = row['Labels'] |
|
story.type = row['Story Type'] |
|
story.created_at = row['Created at'] |
|
story.description = row['Description'] |
|
story.url = row['URL'] |
|
|
|
# first pull all of the comments, then take every other item in the resultant array to chop out the keys |
|
comments = row.select{ |k,v| k.include? 'Comment' } |
|
comments.flatten! |
|
odds = Array.new(comments.size / 2){|i| i * 2 + 1} |
|
comments.replace comments.values_at(*odds) |
|
comments.delete_if { |a| a.nil? or a == "" } |
|
|
|
story.comments = comments |
|
|
|
# first pull all of the tasks, then take every other item in the resultant array to chop out the keys |
|
tasks = row.select{ |k,v| (k.include? 'Task') and (!k.include? 'Task Status') } |
|
tasks.flatten! |
|
odds = Array.new(tasks.size / 2){|i| i * 2 + 1} |
|
tasks.replace tasks.values_at(*odds) |
|
tasks.delete_if { |a| a.nil? or a == "" } |
|
|
|
story.tasks = tasks |
|
|
|
stories << story |
|
end |
|
|
|
# output the kanbanery csv |
|
CSV.open(output_filename, "wb") do |csv| |
|
stories.each do |story| |
|
csv << story.to_kanbanery |
|
end |
|
end |
Running this under Ruby 1.9.3-p392 I get this error:
Looks like flatten! doesn't exist in 1.9.3. Replacing "comments.flatten!" with "commentsarray = comments.flatten" and fixing up references to comments, and the same for tasks, is a way around the problem (probably not the most elegant way but this is the first Ruby code I've had to type!).