python – THE HYPERTEXT http://www.thehypertext.com Thu, 10 Dec 2015 06:10:15 +0000 en-US hourly 1 https://wordpress.org/?v=5.0.4 novel camera http://www.thehypertext.com/2015/12/01/novel-camera/ Tue, 01 Dec 2015 17:10:37 +0000 http://www.thehypertext.com/?p=790 I have spent the last few months completing a novel I started a long time ago and turning it into a non-linear interactive experience. For my final project in several classes, I have transferred this novel into a printer-equipped camera to make a new and different type of photographic experience.

Read More...

]]>
I have spent the last few months completing a novel I started a long time ago and turning it into a non-linear interactive experience. For my final project in several classes, I have transferred this novel into a printer-equipped camera to make a new and different type of photographic experience.

IMG_1321_copy

IMG_1439 copy

IMG_1442 copy

 

Inside the antique camera is a Raspberry Pi with a camera module behind the lens. The flow of passages is controlled by a single, handwritten JSON file. When there is overlap between the tags detected in an image by Clarifai and the tags assigned to a passage, and the candidate passage occurs next in a storyline that has already begun, that passage is printed out. If no passage can be found, the camera prints poetry enabled by a recursive context-free grammar and constructed from words detected in the image.

IMG_1317_copy

 

This week, I am planning to add a back end component that will allow photos taken to be preserved as albums, and passages printed to be read later online. For now, here is the JSON file that controls the order of output:

{
    "zero": {
        "tags": ["moon", "swamp", "marble", "north america", "insect", "street"],
        "order": 0,
        "next": ["story"]
    },
    "guam_zero": {
    	"tags": ["computer", "technology", "future", "keyboard", "politics"],
    	"order": 0,
    	"next": ["guam_one"]
    },
    "guam_one": {
    	"tags": ["computer", "technology", "future", "keyboard", "politics"],
    	"order": 1,
    	"next": []
    },
    "dream_zero": {
    	"tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"],
    	"order": 0,
    	"next": ["chess_board"]
    },
    "chess_board": {
    	"tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"],
    	"order": 2,
    	"next": ["black_queen", "black_pawn", "black_king", "black_rook", "white_king", "white_knight"]
    },
    "black_queen": {
    	"tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "queen"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "black_pawn": {
    	"tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "pawn"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "black_king": {
    	"tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "king"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "black_rook": {
    	"tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "rook", "castle"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "white_king": {
    	"tags": ["dream", "dark", "white", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "king"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "white_knight": {
    	"tags": ["dream", "dark", "white", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "knight"],
    	"order": 3,
    	"next": ["wake_up"]
    },
    "wake_up": {
    	"tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"],
    	"order": 4,
    	"next": []
    },
    "forget": {
    	"tags": ["man", "men", "boy"],
    	"order": 0,
    	"next": []
    },    
    "story": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl"],
    	"order": 1,
    	"next": ["miss_vest", "forget"]
    },
    "miss_vest": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl"],
    	"order": 2,
    	"next": ["envelope", "forget"]
    },
    "envelope": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl", "paper", "envelope", "mail"],
    	"order": 3,
    	"next": ["apartment", "forget"]
    },
    "apartment": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl", "paper", "envelope", "mail"],
    	"order": 4,
    	"next": ["email"]
    },
    "email": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "woman", "women", "girl", "paper", "envelope", "mail", "computer", "technology"],
    	"order": 5,
    	"next": ["match"]
    },
    "match": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "paper", "envelope", "mail", "computer", "technology"],
    	"order": 5,
    	"next": ["smithpoint", "morning"]
    },
    "morning": {
    	"tags": ["day", "sun", "bedroom", "bed", "breakfast", "morning", "dream", "dark", "night"],
    	"order": 6,
    	"next": ["call"]
    },
    "call": {
    	"tags": ["phone", "telephone", "technology", "computer"],
    	"order": 7,
    	"next": ["smithpoint"]
    },
    "smithpoint": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"],
    	"order": 8,
    	"next": ["drive", "forget"]
    },
    "drive": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"],
    	"order": 9,
    	"next": ["take_pill", "toss_pill"]
    },
    "take_pill": {
    	"tags": ["drug", "pill", "man", "men", "boy", "bar", "night", "drink", "alcohol", "wine", "beer"],
    	"order": 10,
    	"next": ["meet_stranger_drugs", "john_home"]
    },
    "toss_pill": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "girl", "street", "woman", "women"],
    	"order": 10,
    	"next": ["meet_stranger_no_drugs"]
    },
    "meet_stranger_drugs": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"],
    	"order": 11,
    	"next": ["john_home"]
    },
    "meet_stranger_no_drugs": {
    	"tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"],
    	"order": 11,
    	"next": ["painting"]
    },
    "painting": {
    	"tags": ["painting", "art", "moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"],
    	"order": 12,
    	"next": []
    },
    "john_home": {
    	"tags": ["drug", "pill", "man", "men", "boy", "bar", "night", "drink", "alcohol", "wine", "beer"],
    	"order": 13,
    	"next": []
    }

}

And here is the code that’s currently running on the Raspberry Pi:

import RPi.GPIO as GPIO
from Adafruit_Thermal import *
import time
import os
import sys
import json
import picamera
from clarifai.client import ClarifaiApi
from pattern.en import referenced

import gen

# Init Clarifai
os.environ["CLARIFAI_APP_ID"] = "nAT8dW6B0Oc5qA6JQfFcdIEr-CajukVSOZ6u_IsN"
os.environ["CLARIFAI_APP_SECRET"] = "BnETdY6wtp8DmXIWCBZf8nE4XNPtlHMdtK0ISNJQ"
clarifai_api = ClarifaiApi() # Assumes Env Vars Set

# Init System Paths
APP_PATH = os.path.dirname(os.path.realpath(__file__))
IMG_PATH = os.path.join(APP_PATH, 'img')
TALE_PATH = os.path.join(APP_PATH, 'tales')

# Init tale_dict
with open(os.path.join(APP_PATH, 'tales_dict.json'), 'r') as infile:
    tale_dict = json.load(infile)

# Seen tales
seen_tales = list()

# Init Camera
camera = picamera.PiCamera()

# Init Printer
printer = Adafruit_Thermal("/dev/ttyAMA0", 9600, timeout=5)
printer.boldOn()

# Init GPIO
# With camera pointed forward...
# LEFT:  11 (button), 15 (led)
# RIGHT: 13 (button), 16 (led)
GPIO.setmode(GPIO.BOARD)
ledPins = (15,16)
butPins = (11,13)

for pinNo in ledPins:
    GPIO.setup(pinNo, GPIO.OUT)

for pinNo in butPins:
    GPIO.setup(pinNo, GPIO.IN, pull_up_down=GPIO.PUD_UP)

# Open Grammar Dict
with open(os.path.join(APP_PATH, 'weird_grammar.json'), 'r') as infile:
    grammar_dict = json.load(infile)

def blink_left_right(count):
    ledLeft, ledRight = ledPins
    for _ in range(count):
        GPIO.output(ledRight, False)
        GPIO.output(ledLeft, True)
        time.sleep(0.2)
        GPIO.output(ledRight, True)
        GPIO.output(ledLeft, False)
        time.sleep(0.2)
    GPIO.output(ledRight, False)

def to_lines(sentences):
    def sentence_to_lines(text):
        LL = 32
        tokens = text.split(' ')
        lines = list()
        curLine = list()
        charCount = 0
        for t in tokens:
            charCount += (len(t)+1)
            if charCount > LL:
                lines.append(' '.join(curLine))
                curLine = [t]
                charCount = len(t)+1
            else:
                curLine.append(t)
        lines.append(' '.join(curLine))
        return '\n'.join(lines)
    sentence_lines = map(sentence_to_lines, sentences)
    return '\n\n'.join(sentence_lines)

def open_tale(tale_name):
    with open(os.path.join(TALE_PATH, tale_name), 'r') as infile:
        tale_text = to_lines(
            filter(lambda x: x.strip(), infile.read().strip().split('\n'))
        )
    return tale_text

def pick_tale(tags, next_tales):
    choice = str()
    record = 0
    for tale in tale_dict:
        if tale in next_tales or tale_dict[tale]['order'] == 0:
            score = len(set(tale_dict[tale]['tags']) & set(tags))
            if tale in next_tales and score > 0 and not tale in seen_tales:
                score += 100
            if score > record:
                choice = tale
                record = score
    return choice


blink_left_right(5)
imgCount = 1
cur_tale = str()


while True:
    inputLeft, inputRight = map(GPIO.input, butPins)
    if inputLeft != inputRight:
        try:
            img_fn = str(int(time.time()*100))+'.jpg'
            img_fp = os.path.join(IMG_PATH, img_fn)

            camera.capture(img_fp)

            blink_left_right(3)

            result = clarifai_api.tag_images(open(img_fp))
            tags = result['results'][0]['result']['tag']['classes']

            if cur_tale:
                next_tales = tale_dict[cur_tale]['next']
            else:
                next_tales = list()

            tale_name = pick_tale(tags, next_tales)
            cur_tale = tale_name

            if tale_name:
                lines_to_print = open_tale(tale_name)
                seen_tales.append(tale_name)

            else:
                grammar_dict["N"].extend(tags)

                if not inputLeft:
                    sentences = [gen.make_polar(grammar_dict, 10, sent=0) for _ in range(10)]
                elif not inputRight:
                    sentences = [gen.make_polar(grammar_dict, 10) for _ in range(10)]
                else:
                    sentences = gen.main(grammar_dict, 10)

                lines_to_print = to_lines(sentences)

            prefix = '\n\n\nNo. %i\n\n'%imgCount

            printer.println(prefix+lines_to_print+'\n\n\n')

            grammar_dict["N"] = list()
            imgCount += 1
        except:
            blink_left_right(15)
            print sys.exc_info()

    elif (not inputLeft) and (not inputRight):
        offCounter = 0
        for _ in range(100):
            inputLeft, inputRight = map(GPIO.input, butPins)
            if (not inputLeft) and (not inputRight):
                time.sleep(0.1)
                offCounter += 1
                if offCounter > 50:
                    os.system('sudo shutdown -h now')
            else:
                break

 

Click here for a Google Drive folder with all the passages from the novel.

]]>
word.camera exhibition http://www.thehypertext.com/2015/11/24/word-camera-exhibition/ Tue, 24 Nov 2015 19:58:34 +0000 http://www.thehypertext.com/?p=772 This week, I've been exhibiting my ongoing project, word.camera, at IDFA DocLab in Amsterdam.

Read More...

]]>
IMG_1338

This week, I’ve been exhibiting my ongoing project, word.camera, at IDFA DocLab in Amsterdam. My installation consists of four cameras:

  1. The original word.camera physical prototype:9503_20150507_tlr_1000px
  2. The sound camera physical prototype:IMG_1264
  3. A new word.camera model that uses a context-free grammar to generate poems based on the images it captures:IMG_1321_copyIMG_1317_copy
  4. A talking, pan-tilt-zoom surveillance camera that looks for faces in the hallway and then describes them aloud. (See also: this Motherboard video)
    IMG_2324
    IMG_2322
    IMG_1365

 

During the exhibition, I was also invited to deliver two lectures. Here are my slides from the first lecture:

And here’s a video of the second one:

 

Visitors are able to reserve the portable cameras for half hour blocks by leaving their ID at the volunteer kiosk. I have really enjoyed watching people borrow and use my cameras.

IMG_1333 copy

 

]]>
Run-length encoding algorithm http://www.thehypertext.com/2015/10/28/run-length-encoding-algorithm/ Wed, 28 Oct 2015 01:06:58 +0000 http://www.thehypertext.com/?p=769 For our first assignment in Learning Machines, Patrick asked us to implement run-length encoding in Python.

Read More...

]]>
For our first assignment in Learning Machines, Patrick asked us to implement run-length encoding in Python.

Below is my code, which includes encode and decode functions.

def encode(u):
	# init tracking vars
	enc = list()
	cur = list(u[0])
	# iterate through string
	for i in range(1, len(u)):
		if u[i] == u[i-1]:
			cur.append(u[i])
		else:
			enc.append((len(cur), u[i-1]))
			cur = list(u[i])
	# handle last character
	enc.append((len(cur), cur[0]))

	def rep(tup):
		# encode using backtick as delimeter
		count, char = tup
		if count == 1:
			return char
		else:
			return "%s`%i`" % (char, count)

	return ''.join(map(rep, enc))

def decode(e):
	# init tracking vars
	result = list()
	cur_num = list()
	# switch var
	on_num = False
	# iterate through encoded string
	for i, char in enumerate(e):
		# if delimeter found...
		if char == "`":
			# switch on/off
			on_num = not on_num
			# if closing delimeter
			if not on_num:
				result.append(
					rep_char*int(''.join(cur_num))
				)
				cur_num = list()
			# if opening delimeter
			elif on_num and i > 0:
				# repeated char is last
				# added to result
				rep_char = result.pop()
		# if not delimeter and not on number
		elif not on_num:
			result.append(char)
		# if not delimeter and on number
		else:
			cur_num.append(char)
	return ''.join(result)



if __name__ == '__main__':
	import sys
	to_encode = sys.argv[1]
	encoded = encode(to_encode)
	decoded = decode(encoded)
	print encoded
	print decoded
	assert to_encode == decoded

 

]]>
artificial intelligence http://www.thehypertext.com/2015/10/27/artificial-intelligence/ Tue, 27 Oct 2015 19:06:42 +0000 http://www.thehypertext.com/?p=758 For my current project in Temporary Expert, I have been experimenting with artificially intelligent voice interfaces in order to build an art piece with similar functionality to the Amazon Echo, but with unexpected properties.

Read More...

]]>
For my current project in Temporary Expert, I have been experimenting with artificially intelligent voice interfaces in order to build an art piece with similar functionality to the Amazon Echo, but with unexpected properties.

feature-key-features

My robot will take the form of a benevolent computer virus. Using tools like pyautogui and the python webbrowser library, it will respond to user inquiries by opening documents, typing, and displaying web pages. It will also talk back to users using Apple’s text-to-speech utility.

I am building this robot using Wit.ai, a deep learning tool for making voice interfaces. Using the tool’s dashboard, I have been training my robot to respond to various user intents.

Screen Shot 2015-10-27 at 2.56.56 PM

The core of the functionality will be a therapy bot similar to ELIZA, but with some additional functionality. When this project is complete, I believe it will provide an interesting take on artificial intelligence. Using AI tools for different purposes than they were designed, I hope to make users question whether the tool they are using is in fact sentient and aware of their presence.

]]>
Sound Camera, Part III http://www.thehypertext.com/2015/10/21/sound-camera-part-iii/ Wed, 21 Oct 2015 22:10:27 +0000 http://www.thehypertext.com/?p=741 I completed the physical prototype of the sound camera inside the enclosure I specified in my prior post, the Kodak Brownie Model 2.

Read More...

]]>
I completed the physical prototype of the sound camera inside the enclosure I specified in my prior post, the Kodak Brownie Model 2.


IMG_1264

I started by adding a shutter button to the top of the enclosure. I used a Cherry MX Blue mechanical keyboard switch that I had leftover from a project last year.

IMG_1268

 

The battery and Raspberry Pi just barely fit into the enclosure:

IMG_1267

IMG_1265

 

The Raspberry Pi camera module is wedged snugly beneath the camera’s front plate:

IMG_1263

 

In additional to playing the song, I added some functionality that provides a bit of context to the user. Using the pico2wave text-to-speech utility, the camera speaks the tags aloud before playing the song. Additionally, using SoX, the camera plays an initialization tone generated from the color histogram of the image before reading the tags.

Here’s the code that’s currently running on the Raspberry Pi:

from __future__ import unicode_literals

import os
import json
import uuid
import time
from random import choice as rc
from random import sample as rs
import re
import subprocess

import RPi.GPIO as GPIO
import picamera
from clarifai.client import ClarifaiApi
import requests
from PIL import Image

import sys
import threading

import spotify

import genius_token

# SPOTIFY STUFF

# Assuming a spotify_appkey.key in the current dir
session = spotify.Session()

# Process events in the background
loop = spotify.EventLoop(session)
loop.start()

# Connect an audio sink
audio = spotify.AlsaSink(session)

# Events for coordination
logged_in = threading.Event()
logged_out = threading.Event()
end_of_track = threading.Event()

logged_out.set()


def on_connection_state_updated(session):
    if session.connection.state is spotify.ConnectionState.LOGGED_IN:
        logged_in.set()
        logged_out.clear()
    elif session.connection.state is spotify.ConnectionState.LOGGED_OUT:
        logged_in.clear()
        logged_out.set()


def on_end_of_track(self):
    end_of_track.set()

# Register event listeners
session.on(
    spotify.SessionEvent.CONNECTION_STATE_UPDATED, on_connection_state_updated)
session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track)

# Assuming a previous login with remember_me=True and a proper logout
# session.relogin()
# session.login(genius_token.spotify_un, genius_token.spotify_pwd, remember_me=True)

# logged_in.wait()

# CAMERA STUFF

# Init Camera
camera = picamera.PiCamera()

# Init GPIO
GPIO.setmode(GPIO.BCM)

# Button Pin
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)

IMGPATH = '/home/pi/soundcamera/img/'

clarifai_api = ClarifaiApi()

def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in xrange(0, len(l), n):
        yield l[i:i+n]

def take_photo():
    fn = str(int(time.time()))+'.jpg' # TODO: Change to timestamp hash
    fp = IMGPATH+fn
    camera.capture(fp)
    return fp

def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in xrange(0, len(l), n):
        yield l[i:i+n]

def get_tags(fp):
    fileObj = open(fp)
    result = clarifai_api.tag_images(fileObj)
    resultObj = result['results'][0]
    tags = resultObj['result']['tag']['classes']
    return tags

def genius_search(tags):
    access_token = genius_token.token
    payload = {
        'q': ' '.join(tags),
        'access_token': access_token
    }
    endpt = 'http://api.genius.com/search'
    response = requests.get(endpt, params=payload)
    results = response.json()
    hits = results['response']['hits']
    
    artists_titles = []
    
    for h in hits:
        hit_result = h['result']
        if hit_result['url'].endswith('lyrics'):
            artists_titles.append(
                (hit_result['primary_artist']['name'], hit_result['title'])
            )
    
    return artists_titles

def spotify_search(query):
    endpt = "https://api.spotify.com/v1/search"
    payload = {
        'q': query,
        'type': 'track'
    }
    response = requests.get(endpt, params=payload)
    result = response.json()
    result_zero = result['tracks']['items'][0]
    
    return result_zero['uri']

def main(fn):
    tags = get_tags(fn)
    for tag_chunk in chunks(tags,3):
        artists_titles = genius_search(tag_chunk)
        for artist, title in artists_titles:
            try:
                result_uri = spotify_search(artist+' '+title)
            except IndexError:
                pass
            else:
                print tag_chunk
                byline = "%s by %s" % (title, artist)
                print byline
                to_read = ', '.join(tag_chunk) + ". " + byline
                return to_read, result_uri

def play_uri(track_uri):
    # Play a track
    # audio = spotify.AlsaSink(session)
    session.login(genius_token.spotify_un, genius_token.spotify_pwd, remember_me=True)
    logged_in.wait()
    track = session.get_track(track_uri).load()
    session.player.load(track)
    session.player.play()


def stop_track():
    session.player.play(False)
    session.player.unload()
    session.logout()
    logged_out.wait()
    audio._close()

def talk(msg):
    proc = subprocess.Popen(
        ['bash', '/home/pi/soundcamera/play_text.sh', msg]
    )
    proc.communicate()

def play_tone(freqs):
    freq1, freq2 = freqs
    proc = subprocess.Popen(
        ['play', '-n', 'synth', '0.25', 'saw', "%i-%i" % (freq1, freq2)]
    )
    proc.communicate()

def histo_tone(fp):
    im = Image.open(fp)
    hist = im.histogram()
    vals = map(sum, chunks(hist, 64)) # list of 12 values
    print vals
    map(play_tone, chunks(vals,2))

if __name__ == "__main__":
    input_state = True
    new_state = True
    hold_counter = 0
    while 1:
        input_state = GPIO.input(18)
        if not (input_state and new_state):
            talk("capturing")

            # Hold for 15 seconds to turn off
            while not GPIO.input(18):
                time.sleep(0.1)
                hold_counter += 1
                if hold_counter > 150:
                    os.system('shutdown now -h')
                    sys.exit()

            # Reset hold counter
            hold_counter = 0

            # Else take photo
            try:
                img_fp = take_photo()
                msg, uri = main(img_fp)
                histo_tone(img_fp)
                talk(msg)
                play_uri(uri)
            except:
                print sys.exc_info()

            # Wait for playback to complete or Ctrl+C
            try:
                while not end_of_track.wait(0.1):
                    # If new photo, play new song
                    new_state = GPIO.input(18)
                    if not new_state:
                        stop_track()
                        # time.sleep(2)
                        break
            except KeyboardInterrupt:
                pass

 

]]>
Sound Camera, Part II http://www.thehypertext.com/2015/10/06/sound-camera-part-ii/ Tue, 06 Oct 2015 02:20:44 +0000 http://www.thehypertext.com/?p=733 Using JavaScript and Python Flask, I created a functional software prototype of the Sound Camera.

Read More...

]]>
Using JavaScript and Python Flask, I created a functional software prototype of the Sound Camera: rossgoodwin.com/soundcamera

The front-end JavaScript code is available on GitHub. Here is the primary back-end Python code:

import os
import json
import uuid
from base64 import decodestring
import time
from random import choice as rc
from random import sample as rs
import re

import PIL
from PIL import Image
import requests
import exifread

from flask import Flask, request, abort, jsonify
from flask.ext.cors import CORS
from werkzeug import secure_filename

from clarifai.client import ClarifaiApi

app = Flask(__name__)
CORS(app)

app.config['UPLOAD_FOLDER'] = '/var/www/SoundCamera/SoundCamera/static/img'
IMGPATH = '/var/www/SoundCamera/SoundCamera/static/img/'

clarifai_api = ClarifaiApi()

@app.route("/")
def index():
    return "These aren't the droids you're looking for."

@app.route("/img", methods=["POST"])
def img():
	request.get_data()
	if request.method == "POST":
		f = request.files['file']
		if f:
			filename = secure_filename(f.filename)
			f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
			new_filename = resize_image(filename)
			return jsonify(uri=main(new_filename))
		else:
			abort(501)

@app.route("/b64", methods=["POST"])
def base64():
	if request.method == "POST":
		fstring = request.form['base64str']
		filename = str(uuid.uuid4())+'.jpg'
		file_obj = open(IMGPATH+filename, 'w')
		file_obj.write(fstring.decode('base64'))
		file_obj.close()
		return jsonify(uri=main(filename))

@app.route("/url")
def url():
	img_url = request.args.get('url')
	response = requests.get(img_url, stream=True)
	orig_filename = img_url.split('/')[-1]
	if response.status_code == 200:
		with open(IMGPATH+orig_filename, 'wb') as f:
			for chunk in response.iter_content(1024):
				f.write(chunk)
		new_filename = resize_image(orig_filename)
		return jsonify(uri=main(new_filename))
	else:
		abort(500)


# def allowed_img_file(filename):
#     return '.' in filename and \
# 		filename.rsplit('.', 1)[1].lower() in set(['.jpg', '.jpeg', '.png'])

def resize_image(fn):
    longedge = 640
    orientDict = {
        1: (0, 1),
        2: (0, PIL.Image.FLIP_LEFT_RIGHT),
        3: (-180, 1),
        4: (0, PIL.Image.FLIP_TOP_BOTTOM),
        5: (-90, PIL.Image.FLIP_LEFT_RIGHT),
        6: (-90, 1),
        7: (90, PIL.Image.FLIP_LEFT_RIGHT),
        8: (90, 1)
    }

    imgOriList = []
    try:
        f = open(IMGPATH+fn, "rb")
        exifTags = exifread.process_file(f, details=False, stop_tag='Image Orientation')
        if 'Image Orientation' in exifTags:
            imgOriList.extend(exifTags['Image Orientation'].values)
    except:
        pass

    img = Image.open(IMGPATH+fn)
    w, h = img.size
    newName = str(uuid.uuid4())+'.jpeg'
    if w >= h:
        wpercent = (longedge/float(w))
        hsize = int((float(h)*float(wpercent)))
        img = img.resize((longedge,hsize), PIL.Image.ANTIALIAS)
    else:
        hpercent = (longedge/float(h))
        wsize = int((float(w)*float(hpercent)))
        img = img.resize((wsize,longedge), PIL.Image.ANTIALIAS)

    for val in imgOriList:
        if val in orientDict:
            deg, flip = orientDict[val]
            img = img.rotate(deg)
            if flip != 1:
                img = img.transpose(flip)

    img.save(IMGPATH+newName, format='JPEG')
    os.remove(IMGPATH+fn)
    
    return newName

def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in xrange(0, len(l), n):
        yield l[i:i+n]

def get_tags(fp):
    fileObj = open(fp)
    result = clarifai_api.tag_images(fileObj)
    resultObj = result['results'][0]
    tags = resultObj['result']['tag']['classes']
    return tags

def genius_search(tags):
    access_token = 'd2IuV9fGKzYEWVnzmLVtFnm-EYvBQKR8Uh3I1cfZOdr8j-BGVTPThDES532dym5a'
    payload = {
        'q': ' '.join(tags),
        'access_token': access_token
    }
    endpt = 'http://api.genius.com/search'
    response = requests.get(endpt, params=payload)
    results = response.json()
    hits = results['response']['hits']
    
    artists_titles = []
    
    for h in hits:
        hit_result = h['result']
        if hit_result['url'].endswith('lyrics'):
            artists_titles.append(
                (hit_result['primary_artist']['name'], hit_result['title'])
            )
    
    return artists_titles

def spotify_search(query):
    endpt = "https://api.spotify.com/v1/search"
    payload = {
        'q': query,
        'type': 'track'
    }
    response = requests.get(endpt, params=payload)
    result = response.json()
    result_zero = result['tracks']['items'][0]
    
    return result_zero['uri']

def main(fn):
    tags = get_tags(IMGPATH+fn)
    for tag_chunk in chunks(tags,3):
        artists_titles = genius_search(tag_chunk)
        for artist, title in artists_titles:
            try:
                result_uri = spotify_search(artist+' '+title)
            except IndexError:
                pass
            else:
                return result_uri


if __name__ == "__main__":
    app.run()

 

It uses the same algorithm discussed in my prior post. Now that I have the opportunity to test it more, I am not quite satisfied with the results it is providing. First of all, they are not entirely deterministic (you can upload the same photo twice and end up with two different songs in some cases). Moreover, the results from a human face — which I expect to be a common use case — are not very personal. For the next steps in this project, I plan to integrate additional data including GPS, weather, time of day, and possibly even facial expressions in order to improve the output.

The broken cameras I ordered from eBay have arrived, and I have been considering how to use them as cases for the new models. I also purchased a GPS module for my Raspberry Pi, so the next Sound Camera prototype, with new features integrated, will likely be a physical version. I’m planning to use this Kodak Brownie camera (c. 1916):

IMG_1207

]]>
So it goes. http://www.thehypertext.com/2015/10/06/so-it-goes/ Tue, 06 Oct 2015 01:02:23 +0000 http://www.thehypertext.com/?p=717 Kurt Vonnegut's complete works, analyzed for sentiment, visualized as interactive TF-IDF word clouds

Read More...

]]>
Kurt Vonnegut once gave a brief, delightful lecture on the shapes of stories:

 

This was the primary inspiration for my latest project, which features Kurt Vonnegut’s complete works, analyzed for sentiment, and visualized as interactive word clouds. I developed it entirely in front-end JavaScript, and it’s currently hosted on GitHub pages: rossgoodwin.com/vonnegut

Screen Shot 2015-10-05 at 8.26.38 PM

Screen Shot 2015-10-05 at 8.26.03 PM

Screen Shot 2015-10-05 at 8.24.51 PM

 

Users can scrub through the sentiment graph of each book from start to finish and see a word cloud displayed for each position on the slider. Each word cloud represents 10 paragraphs of the book. Along with the rises and dips in the graph, sentiment values are indicated by the color of the word cloud text, which ranges from dark green (highly positive) to bright red (highly negative).

Rather than simply using word count or frequency for the size of the words, I used TF-IDF scores. (Each 10 paragraph block was treated as one document, and each book was treated as an independent set of documents.) As a result, the largest words in each word cloud are those that make their respective section unique in the context of the entire book.

The first steps in creating this project were to parse Vonnegut’s books, perform TF-IDF calculations for each word and sentiment analysis for each 10-paragraph segment, then store the resulting data in a set of JSON files. Here are the iPython Notebooks where I completed these steps:

Once I had the JSON files, I used D3 to create the word clouds and Chart.js to create the line graphs. The sliders are HTML range inputs, modified with custom CSS. I wanted to create the appearance of long, semi-transparent planchettes sliding over the graphs. Getting the sliders to line up with the graphs precisely was particularly challenging, as was providing the option to click on the graphs in any location and automatically move the sliders to that location.

Here is my JavaScript code, in its current state:

(function() {

Number.prototype.map = function (in_min, in_max, out_min, out_max) {
  return (this - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
}

function titleCase(str) {
    return str.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();});
}

// Charts.js global config
Chart.defaults.global.animation = false;
Chart.defaults.global.tooltipEvents = [];
Chart.defaults.global.scaleFontFamily = "'Cousine', monospace";
Chart.defaults.global.showScale = false;

// var spectrum = ['#F22613', '#E74C3C', '#D35400', '#F2784B', '#95A5A6', '#68C3A3', '#4DAF7C', '#3FC380', '#2ECC71'];
var spectrum = ["#f22613", "#f25749", "#f28379", "#f2b0aa", "#95a5a6", "#add9c2", "#74b391", "#45996c", "#1e824c"];


$("#key-block").append(
  '<div id=\"key-text-box\"><p class=\"text-center lead small\" style=\"margin-left: 7px;\">&lt;&lt;&lt; negative | positive &gt;&gt;&gt;</p></div>'
);

spectrum.map(function(hex){
  $("#key-block").append(
    '<div class=\"key-color\" style=\"background-color:'+hex+';\"></div>'
  );
});

function updateCloud(bookslug, section) {

  $.getJSON("data/vonnegut-"+section+".json", function(data){

    // var factor = Math.pow(data[bookslug]['tfidf'].length, 2);

    var layout = d3.layout.cloud()
        .size([800, 500])
        .words(data[bookslug]['tfidf'].map(function(d) {
          return {text: d[0], size: d[1] * 500};
        }))
        .padding(3)
        .rotate(function() { return 0; }) // return ~~(Math.random() * 2) * 90
        .font("Cousine")
        .fontSize(function(d) { return d.size; })
        .on("end", draw);
    layout.start();

    function draw(words) {

      var overallContainer = d3.select("#"+bookslug);

      overallContainer.select("svg").remove();
      overallContainer.select("a").remove();

      var svgContainer = overallContainer.append("svg")
          .attr("width", layout.size()[0])
          .attr("height", layout.size()[1])
          .attr("class", "svg-cont");

      var wordCloud = svgContainer.append("g")
          .attr("transform", "translate(" + layout.size()[0] / 2 + "," + layout.size()[1] / 2 + ")")
        .selectAll("text")
          .data(words)
        .enter().append("text")
          .transition().duration(500)
          .style("font-size", function(d) { return d.size + "px"; })
          .style("font-family", "Cousine")
          .style("fill", function(d, i) {
              var sentiment = data[bookslug]['sentiment'];
              var ix = Math.floor(((sentiment + 1)/2)*spectrum.length);
              return spectrum[ix];
          })
          .attr("text-anchor", "middle")
          .attr("transform", function(d) {
            return "translate(" + [d.x, d.y] + ")rotate(" + d.rotate + ")";
          })
          .text(function(d) { return d.text; });

      var title = titleCase(data[bookslug]['title']);

      var labelText = overallContainer
                      .append("a")
                      .attr("href", "http://www.amazon.com/exec/obidos/external-search/?field-keywords=%s"+title+"&mode=blended")
                      .attr("class", "twitter-link")
                      .attr("target", "_blank")
                      .text(title);

      overallContainer.transition()
          .style("opacity", 1.0)
          .delay(1000)
          .duration(3000);
    }

  });

}

$.getJSON("data/sentiment.json", function(sent){
$.getJSON("data/vonnegut-0.json", function(data){
  $("#loadinggif").fadeOut("slow");
  Object.keys(data).sort().map(function(slug){
    $("#vis").append(
      '<div id=\"'+slug+'\" class=\"col-md-12 transparent text-center\"></div>'
    );

    $("#"+slug).append(
      '<canvas class="chart-canvas" id=\"'+slug+'-chart\" width=\"800\" height=\"150\"></canvas>'
    );

    var ctx = document.getElementById(slug+"-chart").getContext("2d");

    var xLabels = [];

    for (var i=0;i<data[slug]['length'];i++) {
      xLabels.push('');
    }

    var chartData = {
        labels: xLabels,
        datasets: [
            {
                label: titleCase(data[slug]['title']),
                fillColor: "rgba(210, 215, 211, 0.7)",
                strokeColor: "rgba(189, 195, 199, 1)",
                pointColor: "rgba(210, 215, 211, 1)",
                pointStrokeColor: "#fff",
                pointHighlightFill: "#fff",
                pointHighlightStroke: "rgba(220,220,220,1)",
                data: sent[slug]
            }
        ]
    };

    var chartOptions = {
      pointDot : false,
      pointHitDetectionRadius : 5,
      scaleShowVerticalLines: false,
      bezierCurve: false
    };

    var myNewChart = new Chart(ctx).Line(chartData, chartOptions);

    var stepCount = data[slug]['length'] - 1;

    $("#"+slug).append(
      '<div class=\"scrubber\"><input id=\"'+slug+'-scrub\" type=\"range\" min=\"0\" max=\"'+stepCount+'\" value=\"0\" step=\"1\"></div>'
    );

    $("#"+slug+"-chart").on("click", function(evt){
      var activePoints = myNewChart.getPointsAtEvent(evt);
      var xPos = activePoints[Math.floor(activePoints.length/2)].x;
      var ix = Math.floor(xPos.map(0, 800, 0, data[slug]['length']));
      console.log(xPos);
      console.log(ix);
      $('#'+slug+'-scrub').val(ix);
      updateCloud(slug, ix);
    });

    // Play Button
    $('#'+slug).append(
      '<button type=\"button\" id=\"'+slug+'-btn\" class=\"btn btn-default btn-xs play-btn\" aria-label=\"Play\"><span class=\"glyphicon glyphicon-play\" aria-hidden=\"true\"></span></button>'
    );

    $('#'+slug).append(
      '<button type=\"button\" id=\"'+slug+'-btn-pause\" class=\"btn btn-default btn-xs play-btn\" aria-label=\"Pause\"><span class=\"glyphicon glyphicon-pause\" aria-hidden=\"true\"></span></button>'
    );

    // Load First Clouds
    updateCloud(slug, 0);

    var play;

    $('#'+slug+'-btn').click(function(){

      console.log('clicked ' + slug);
      autoAdvance();
      play = setInterval(function(){
        autoAdvance();
      }, 5000);

      function autoAdvance(){
          var scrubVal = $('#'+slug+'-scrub').val();
          console.log(data[slug]['length']);
          if (scrubVal >= data[slug]['length']-1) {
            console.log("EOR");
            clearInterval(play);
          }
          console.log(scrubVal);
          var newVal = parseInt(scrubVal, 10) + 1;
          $('#'+slug+'-scrub').val(newVal);
          updateCloud(slug, newVal);
      }

    });



    $('#'+slug+'-btn-pause').click(function(){
      clearInterval(play);
    });


    $("#"+slug+"-scrub").on("input", function(){
      var sectNo = $(this).val();
      console.log(sectNo);
      updateCloud(slug, sectNo);
    });
  });
});
});



})();

 

The rest of my front-end code can be found on GitHub.

]]>
Sound Camera http://www.thehypertext.com/2015/09/14/sound-camera/ http://www.thehypertext.com/2015/09/14/sound-camera/#comments Mon, 14 Sep 2015 04:06:42 +0000 http://www.thehypertext.com/?p=687 This week, I have been prototyping a script that chooses music based on photographs. Ideally, the end result will be a wearable camera / music player that selects tracks for you based on your environment.

Read More...

]]>
I have been looking for ways to push the conceptual framework behind word.camera to another domain. This week, I have been prototyping a script that chooses music based on photographs. Ideally, the end result will be a wearable camera / music player that selects tracks for you based on your environment. Unfortunately, the domain sound.camera has been claimed, but I’m still planning to use the name “Sound Camera” for this project.

ipod shuffle - modified

My code:
iPython Notebook

The script I wrote gets concept words from the image via Clarifai, then searches song lyrics for those words on Genius, then finds the song on Spotify. Below are some images I put through the algorithm. You can click on each one to hear the song that resulted, though you will need to login to Spotify to do so.

 

putin

street

landscape

 

cat

 

 

The next step will be to get this code working on a Raspberry Pi inside one of the film camera bodies I just received via eBay.

]]>
http://www.thehypertext.com/2015/09/14/sound-camera/feed/ 1
Author Cameras http://www.thehypertext.com/2015/09/09/author-cameras/ Wed, 09 Sep 2015 19:58:10 +0000 http://www.thehypertext.com/?p=631 For my primary project in Project Development Studio with Stefani Bardin, I am planning to make 3-5 more physical word cameras.

Read More...

]]>
For my primary project in Project Development Studio with Stefani Bardin, I am planning to make 3-5 more physical word cameras. These models will iterate on my prior physical word camera by printing relevant passages from specific authors, based on convolutional neural network analysis of captured images.

I have not yet chosen the authors I plan to embed in these cameras, or how I plan to present the extracted text. I also have tentative plans for a new iteration of the talking surveillance camera I developed last semester, but more on that will be provided in future posts.

This week, I spent some time on eBay finding a few broken medium- and large-format cameras to use as cases. Here’s what I bought (for $5 to $25 each):

$_57 (3)

$_57 (2)

$_57 (1)

$_57

I am current waiting to receive them so that I can start planning the builds. Below is a list of the additional parts that will be required for each camera:

Raspberry Pi 2 ($40)
85.60mm x 56mm x 21mm (or roughly 3.37″ x 2.21″ x 0.83″)

Raspberry Pi Camera Board ($30)
25mm x 20mm x 9mm

Buck Converter ($10)
51 * 26.3 * 14 (L * W * H) (mm)

7.4V LiIon Battery Pack ($90)
22mm (0.9″) x 104mm (4.1″) x 107mm (4.2″)
OR two USB batteries ($40)

Thermal Printer ($25 from China or $50 from U.S.)
~4 1/8″ (105mm) x 2 1/4″ (58mm) for rectangular hole
~58mm deep

On/Off Switch ($1)
18.60mm x 12.40mm rectangular hole
13.9mm deep

LED Button ($5)
Shutter button, user will hold for 3 seconds to turn off Raspberry Pi
16mm round hole
~1.5″ deep

1/4-size permaproto board ($3)

1/4″ Acrylic ($12) or Broken Medium Format TLR ($30-69)

Jumper Wires ($2)

]]>
Summer Projects http://www.thehypertext.com/2015/09/09/summer-projects/ Wed, 09 Sep 2015 04:16:21 +0000 http://www.thehypertext.com/?p=622 I made some bizarre and wondrous things this summer.

Read More...

]]>
This summer, I wrote a TF-IDF clustering library for my internship at Ufora, and I’m currently working on a long-term project for Fusion to track and analyze online data from candidates in the 2016 US Presidential Election.

Needless to say, those two projects have kept me busy, but I also built a few random things for fun in my spare time. And at this point, the total number of things I’ve built has become too cumbersome for an image-oriented portfolio, so I switched to a Google Spreadsheet-powered method, trusted by Darius Kazemi and (as I’ve learned from my Fusion co-conspirator Daniel McLaughlin) countless data hackers at news and media organizations across the country, whose routine CMS systems may require knowledge of COBOL, or worse, Microsoft Sharepoint.

The personal projects I completed over the summer were:

  • MeterMap | Maps clauses from a text corpus onto the metrical structure of a poem.
  • itpbot | IRC bot for the #itp channel on irc.freenode.com
  • PaletteKnife | Tool that extracts color palettes from photographs, created entirely in front-end JavaScript using p5.js
  • Dick Fractal | Personal website of Richard “Dick” Fractal, Ph.D.
  • Four Oh Four | A URLae
  • @BizarroMOMA | Twitter bot tweeting fictional artworks generated from the Museum of Modern Art’s collections data

Certain folks (namely, my parents) have complained to me for a long time that my website does not contain very much information about what I actually make. For their sake, and my own sanity, I added a list of projects below the introductory letter. I did this by writing a Python script that parses a Google Spreadsheets-generated CSV into a JSON file — I plan to implement the script in JavaScript soon, so that my website updates automatically.

We were also supposed to dig into some JSON for Designing for Data Personalization with Sam Slover, so this task fulfilled multiple roles for me.

Here’s the Python script:

import csv
import json
import datetime
import requests

CSV_URL = ""

response = requests.get(CSV_URL)

with open('projects.csv', 'w') as outfile:
	outfile.write(response.text)

fileObj = open('projects.csv', 'r')

reader = csv.DictReader(fileObj)

projList = []

for row in reader:
	projList.append(row)

fileObj.close()

def get_date(projObj):
	m, d, y = map(int, projObj['Date of Completion'].split('/'))
	dateObj = datetime.datetime(y, m, d, 0, 0)
	return -(dateObj - datetime.datetime(1970,1,1)).total_seconds()

projList = sorted(projList, key=get_date)


with open('projects.json', 'w') as outfile:
	json.dump(projList, outfile)

And here’s the raw JSON:

[
   {
      "Code URL":"https://github.com/rossgoodwin/itpbot",
      "XS S M L XL":"M",
      "Name":"itpbot",
      "Timestamp":"8/16/2015 12:45:21",
      "Documentation URL":"",
      "Date of Completion":"8/14/2015",
      "Main URL":"http://rossgoodwin.com/itpbot",
      "Output URL":"",
      "Description":"IRC bot for the #itp channel on irc.freenode.com"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/paletteknife",
      "XS S M L XL":"S",
      "Name":"Palette Knife",
      "Timestamp":"8/16/2015 1:00:47",
      "Documentation URL":"",
      "Date of Completion":"7/30/2015",
      "Main URL":"http://rossgoodwin.com/paletteknife/",
      "Output URL":"",
      "Description":"Tool that extracts color palettes from photographs, created entirely in front-end JavaScript using p5.js"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/bizarromoma",
      "XS S M L XL":"XS",
      "Name":"@BizarroMoMA",
      "Timestamp":"8/16/2015 12:05:27",
      "Documentation URL":"",
      "Date of Completion":"7/23/2015",
      "Main URL":"https://twitter.com/bizarromoma",
      "Output URL":"",
      "Description":"Twitter bot tweeting fictional artworks generated from the Museum of Modern Art's collections data"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/metermap",
      "XS S M L XL":"M",
      "Name":"MeterMap",
      "Timestamp":"8/17/2015 18:28:55",
      "Documentation URL":"",
      "Date of Completion":"7/21/2015",
      "Main URL":"http://rossgoodwin.com/metermap/",
      "Output URL":"http://rossgoodwin.com/faulkner_wasteland.txt",
      "Description":"Maps clauses from a text corpus onto the metrical structure of a poem."
   },
   {
      "Code URL":"https://github.com/rossgoodwin/tweetingpoints/tree/gh-pages",
      "XS S M L XL":"S",
      "Name":"Tweeting Points",
      "Timestamp":"8/17/2015 18:40:42",
      "Documentation URL":"",
      "Date of Completion":"7/21/2015",
      "Main URL":"http://rossgoodwin.com/tweetingpoints/",
      "Output URL":"",
      "Description":"Real-time word clouds of the most recent tweets from candidates in the 2016 U.S. Presidential Election"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/dickfractal",
      "XS S M L XL":"S",
      "Name":"Dick Fractal",
      "Timestamp":"8/16/2015 12:18:28",
      "Documentation URL":"",
      "Date of Completion":"7/5/2015",
      "Main URL":"http://dickfractal.com",
      "Output URL":"",
      "Description":"Dick Fractal, Ph.D."
   },
   {
      "Code URL":"https://github.com/rossgoodwin/fourohfour",
      "XS S M L XL":"S",
      "Name":"Four Oh Four",
      "Timestamp":"8/16/2015 12:22:14",
      "Documentation URL":"",
      "Date of Completion":"6/18/2015",
      "Main URL":"http://rossgoodwin.com/fourohfour.pdf",
      "Output URL":"",
      "Description":"A URLae is a poetic form consisting of a list of URLs. Four Oh Four is a URLae composed entirely of generated links."
   },
   {
      "Code URL":"",
      "XS S M L XL":"XL",
      "Name":"The Traveler's Lamp",
      "Timestamp":"8/17/2015 18:35:21",
      "Documentation URL":"http://www.thehypertext.com/tag/travelers-lamp/",
      "Date of Completion":"5/8/2015",
      "Main URL":"http://www.thehypertext.com/2015/05/08/travelers-lamp-part-ii/",
      "Output URL":"",
      "Description":"3D-printed cities inspired by those from Invisible Cities by Italo Calvino, which light up in sequence as a computer solves the Traveling Salesman Problem for the distances between them"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/photosynthesis",
      "XS S M L XL":"XL",
      "Name":"word.camera",
      "Timestamp":"8/16/2015 12:10:53",
      "Documentation URL":"http://www.thehypertext.com/tag/word-camera/",
      "Date of Completion":"4/11/2015",
      "Main URL":"https://word.camera",
      "Output URL":"https://word.camera/i/nxDYEXM9R",
      "Description":"As artificial intelligence technology changes our world, it grants us creative possibilities not previously thought possible. word.camera explores one particular AI technology, convolutional neural networks that can generate descriptive words from images, and realizes possibilities for a new type of camera and a new type of photography."
   },
   {
      "Code URL":"https://github.com/rossgoodwin/wraithhimself",
      "XS S M L XL":"XS",
      "Name":"@WraithHimself",
      "Timestamp":"8/16/2015 21:45:54",
      "Documentation URL":"",
      "Date of Completion":"3/9/2015",
      "Main URL":"https://twitter.com/WraithHimself",
      "Output URL":"",
      "Description":"Twitter bot tweeting the entirely of Infinite Jest by David Foster Wallace, one word at a time, as the first word of retweets"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/gutenflag",
      "XS S M L XL":"M",
      "Name":"@GutenFlag",
      "Timestamp":"8/16/2015 21:44:05",
      "Documentation URL":"http://www.thehypertext.com/2015/03/10/gutenflag/",
      "Date of Completion":"3/7/2015",
      "Main URL":"https://twitter.com/GutenFlag",
      "Output URL":"",
      "Description":"Twitter bot that recommends Project Gutenberg ebooks based on topics extracted from users' most recent tweets"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/drgonzo",
      "XS S M L XL":"M",
      "Name":"Dr. Gonzo",
      "Timestamp":"8/17/2015 19:00:17",
      "Documentation URL":"",
      "Date of Completion":"2/17/2015",
      "Main URL":"http://www.thehypertext.com/2015/02/19/dr-gonzo/",
      "Output URL":"",
      "Description":"Hunter S. Thompson therapist chat bot"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/clock/tree/gh-pages",
      "XS S M L XL":"S",
      "Name":"Text Clock",
      "Timestamp":"8/16/2015 12:29:06",
      "Documentation URL":"http://www.thehypertext.com/2015/04/11/text-clock/",
      "Date of Completion":"12/25/2014",
      "Main URL":"http://rossgoodwin.com/clock/",
      "Output URL":"",
      "Description":"The complete Project Gutenberg corpus as a clock"
   },
   {
      "Code URL":"",
      "XS S M L XL":"XS",
      "Name":"Cut-Up",
      "Timestamp":"8/16/2015 21:48:55",
      "Documentation URL":"",
      "Date of Completion":"12/15/2014",
      "Main URL":"https://github.com/rossgoodwin/cutup",
      "Output URL":"",
      "Description":"The Cut-Up Method of Brion Gysin, as described by William S. Burroughs, implemented in Python"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/mtg",
      "XS S M L XL":"XL",
      "Name":"The Mechanical Turk's Ghost",
      "Timestamp":"8/16/2015 22:06:09",
      "Documentation URL":"http://www.thehypertext.com/tag/chess/",
      "Date of Completion":"12/8/2014",
      "Main URL":"http://www.thehypertext.com/2015/01/05/the-mechanical-turks-ghost-part-v/",
      "Output URL":"",
      "Description":"Chessboard that generates music based on computational analysis of a game in real time, and shakes pieces off the board when it determines either player is within range of checkmate"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/ficgen",
      "XS S M L XL":"L",
      "Name":"Fiction Generator",
      "Timestamp":"8/16/2015 12:34:49",
      "Documentation URL":"http://www.thehypertext.com/tag/fiction-generator/",
      "Date of Completion":"12/3/2014",
      "Main URL":"http://fictiongenerator.com",
      "Output URL":"http://rossgoodwin.com/tricks_of_the_trade.pdf",
      "Description":"Generates novels according to user-defined parameters using content scraped from gutenberg.org, tvtropes.org, scp-wiki.net, and erowid.org."
   },
   {
      "Code URL":"https://github.com/rossgoodwin/typingtutor",
      "XS S M L XL":"L",
      "Name":"Stenogloves",
      "Timestamp":"8/17/2015 18:45:32",
      "Documentation URL":"http://www.thehypertext.com/tag/keyboard/",
      "Date of Completion":"11/25/2014",
      "Main URL":"http://www.thehypertext.com/2014/12/09/stenogloves-part-iii/",
      "Output URL":"",
      "Description":"10-key chorded keyboard with alternative typing scheme"
   },
   {
      "Code URL":"",
      "XS S M L XL":"L",
      "Name":"che55",
      "Timestamp":"8/17/2015 18:04:52",
      "Documentation URL":"http://www.thehypertext.com/2014/09/29/general-update/",
      "Date of Completion":"11/2/2014",
      "Main URL":"https://github.com/rossgoodwin/che55",
      "Output URL":"",
      "Description":"Chess GUI and visualizer, written in Processing"
   },
   {
      "Code URL":"https://github.com/dothething/maze",
      "XS S M L XL":"M",
      "Name":"Scary Maze Game",
      "Timestamp":"8/17/2015 18:50:33",
      "Documentation URL":"",
      "Date of Completion":"10/13/2014",
      "Main URL":"http://www.thehypertext.com/2014/10/25/scary-maze-game/",
      "Output URL":"",
      "Description":"A biofeedback game that gets harder when you get scared (with Jerllin Chang and Changyeon Lee)"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/poetizer",
      "XS S M L XL":"L",
      "Name":"Poetizer",
      "Timestamp":"8/17/2015 18:57:00",
      "Documentation URL":"",
      "Date of Completion":"8/20/2014",
      "Main URL":"http://www.thehypertext.com/2014/08/31/poetizer/",
      "Output URL":"http://www.thehypertext.com/2014/09/02/more-poetizer-output/",
      "Description":"Generates free or fixed verse poetry from any text corpus"
   },
   {
      "Code URL":"https://github.com/rossgoodwin/hmap",
      "XS S M L XL":"S",
      "Name":"hmap",
      "Timestamp":"8/17/2015 18:15:16",
      "Documentation URL":"",
      "Date of Completion":"6/12/2014",
      "Main URL":"http://rossgoodwin.com/hmap/",
      "Output URL":"",
      "Description":"Image histogram remapping tool; maps the colors from one image onto another image (created with Anthony Kesich)"
   }
]

And here’s the JavaScript that parses the JSON to my website:

(function(){

$.getJSON("projects.json", function(data){

    $.each(data, function(ix, obj) {

        if (obj['Documentation URL'] != '') {
            var docChunk = ' <a target=\"_blank\" class="btn btn-xs btn-default" href=\"' + obj['Documentation URL'] + '\">documentation</a>';
        } else {
            var docChunk = '';
        }

        if (obj['Output URL'] != '') {
            var outChunk = ' <a target=\"_blank\" class="btn btn-xs btn-default" href=\"' + obj['Output URL'] + '\">output</a>';
        } else {
            var outChunk = '';
        }

        if (obj['Code URL'] != '') {
            var codeChunk = ' <a target=\"_blank\" class="btn btn-xs btn-default" href=\"' + obj['Code URL'] + '\">code</a>';
        } else {
            var codeChunk = '';
        }

        $('#'+obj['XS S M L XL']).append(
            '<p class="project-desc"><a target=\"_blank\" class="btn btn-xs btn-danger" href=\"'+obj['Main URL']+'\">'+obj['Name']+'</a>'+ docChunk + outChunk + codeChunk + '<br>' + obj['Description']+'</p>'
        );

    });

});

})();

 

]]>