Inside the antique camera is a Raspberry Pi with a camera module behind the lens. The flow of passages is controlled by a single, handwritten JSON file. When there is overlap between the tags detected in an image by Clarifai and the tags assigned to a passage, and the candidate passage occurs next in a storyline that has already begun, that passage is printed out. If no passage can be found, the camera prints poetry enabled by a recursive context-free grammar and constructed from words detected in the image.
This week, I am planning to add a back end component that will allow photos taken to be preserved as albums, and passages printed to be read later online. For now, here is the JSON file that controls the order of output:
{ "zero": { "tags": ["moon", "swamp", "marble", "north america", "insect", "street"], "order": 0, "next": ["story"] }, "guam_zero": { "tags": ["computer", "technology", "future", "keyboard", "politics"], "order": 0, "next": ["guam_one"] }, "guam_one": { "tags": ["computer", "technology", "future", "keyboard", "politics"], "order": 1, "next": [] }, "dream_zero": { "tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"], "order": 0, "next": ["chess_board"] }, "chess_board": { "tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"], "order": 2, "next": ["black_queen", "black_pawn", "black_king", "black_rook", "white_king", "white_knight"] }, "black_queen": { "tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "queen"], "order": 3, "next": ["wake_up"] }, "black_pawn": { "tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "pawn"], "order": 3, "next": ["wake_up"] }, "black_king": { "tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "king"], "order": 3, "next": ["wake_up"] }, "black_rook": { "tags": ["dream", "dark", "black", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "rook", "castle"], "order": 3, "next": ["wake_up"] }, "white_king": { "tags": ["dream", "dark", "white", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "king"], "order": 3, "next": ["wake_up"] }, "white_knight": { "tags": ["dream", "dark", "white", "night", "sleep", "bed", "bedroom", "indoors", "chess", "game", "knight"], "order": 3, "next": ["wake_up"] }, "wake_up": { "tags": ["dream", "dark", "night", "sleep", "bed", "bedroom", "indoors"], "order": 4, "next": [] }, "forget": { "tags": ["man", "men", "boy"], "order": 0, "next": [] }, "story": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl"], "order": 1, "next": ["miss_vest", "forget"] }, "miss_vest": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl"], "order": 2, "next": ["envelope", "forget"] }, "envelope": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl", "paper", "envelope", "mail"], "order": 3, "next": ["apartment", "forget"] }, "apartment": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "street", "woman", "women", "girl", "paper", "envelope", "mail"], "order": 4, "next": ["email"] }, "email": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "woman", "women", "girl", "paper", "envelope", "mail", "computer", "technology"], "order": 5, "next": ["match"] }, "match": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "paper", "envelope", "mail", "computer", "technology"], "order": 5, "next": ["smithpoint", "morning"] }, "morning": { "tags": ["day", "sun", "bedroom", "bed", "breakfast", "morning", "dream", "dark", "night"], "order": 6, "next": ["call"] }, "call": { "tags": ["phone", "telephone", "technology", "computer"], "order": 7, "next": ["smithpoint"] }, "smithpoint": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"], "order": 8, "next": ["drive", "forget"] }, "drive": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"], "order": 9, "next": ["take_pill", "toss_pill"] }, "take_pill": { "tags": ["drug", "pill", "man", "men", "boy", "bar", "night", "drink", "alcohol", "wine", "beer"], "order": 10, "next": ["meet_stranger_drugs", "john_home"] }, "toss_pill": { "tags": ["moon", "swamp", "marble", "north america", "insect", "girl", "street", "woman", "women"], "order": 10, "next": ["meet_stranger_no_drugs"] }, "meet_stranger_drugs": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"], "order": 11, "next": ["john_home"] }, "meet_stranger_no_drugs": { "tags": ["moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"], "order": 11, "next": ["painting"] }, "painting": { "tags": ["painting", "art", "moon", "swamp", "marble", "north america", "insect", "night", "man", "men", "boy", "bar", "drink", "alcohol", "wine", "beer"], "order": 12, "next": [] }, "john_home": { "tags": ["drug", "pill", "man", "men", "boy", "bar", "night", "drink", "alcohol", "wine", "beer"], "order": 13, "next": [] } }
And here is the code that’s currently running on the Raspberry Pi:
import RPi.GPIO as GPIO from Adafruit_Thermal import * import time import os import sys import json import picamera from clarifai.client import ClarifaiApi from pattern.en import referenced import gen # Init Clarifai os.environ["CLARIFAI_APP_ID"] = "nAT8dW6B0Oc5qA6JQfFcdIEr-CajukVSOZ6u_IsN" os.environ["CLARIFAI_APP_SECRET"] = "BnETdY6wtp8DmXIWCBZf8nE4XNPtlHMdtK0ISNJQ" clarifai_api = ClarifaiApi() # Assumes Env Vars Set # Init System Paths APP_PATH = os.path.dirname(os.path.realpath(__file__)) IMG_PATH = os.path.join(APP_PATH, 'img') TALE_PATH = os.path.join(APP_PATH, 'tales') # Init tale_dict with open(os.path.join(APP_PATH, 'tales_dict.json'), 'r') as infile: tale_dict = json.load(infile) # Seen tales seen_tales = list() # Init Camera camera = picamera.PiCamera() # Init Printer printer = Adafruit_Thermal("/dev/ttyAMA0", 9600, timeout=5) printer.boldOn() # Init GPIO # With camera pointed forward... # LEFT: 11 (button), 15 (led) # RIGHT: 13 (button), 16 (led) GPIO.setmode(GPIO.BOARD) ledPins = (15,16) butPins = (11,13) for pinNo in ledPins: GPIO.setup(pinNo, GPIO.OUT) for pinNo in butPins: GPIO.setup(pinNo, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Open Grammar Dict with open(os.path.join(APP_PATH, 'weird_grammar.json'), 'r') as infile: grammar_dict = json.load(infile) def blink_left_right(count): ledLeft, ledRight = ledPins for _ in range(count): GPIO.output(ledRight, False) GPIO.output(ledLeft, True) time.sleep(0.2) GPIO.output(ledRight, True) GPIO.output(ledLeft, False) time.sleep(0.2) GPIO.output(ledRight, False) def to_lines(sentences): def sentence_to_lines(text): LL = 32 tokens = text.split(' ') lines = list() curLine = list() charCount = 0 for t in tokens: charCount += (len(t)+1) if charCount > LL: lines.append(' '.join(curLine)) curLine = [t] charCount = len(t)+1 else: curLine.append(t) lines.append(' '.join(curLine)) return '\n'.join(lines) sentence_lines = map(sentence_to_lines, sentences) return '\n\n'.join(sentence_lines) def open_tale(tale_name): with open(os.path.join(TALE_PATH, tale_name), 'r') as infile: tale_text = to_lines( filter(lambda x: x.strip(), infile.read().strip().split('\n')) ) return tale_text def pick_tale(tags, next_tales): choice = str() record = 0 for tale in tale_dict: if tale in next_tales or tale_dict[tale]['order'] == 0: score = len(set(tale_dict[tale]['tags']) & set(tags)) if tale in next_tales and score > 0 and not tale in seen_tales: score += 100 if score > record: choice = tale record = score return choice blink_left_right(5) imgCount = 1 cur_tale = str() while True: inputLeft, inputRight = map(GPIO.input, butPins) if inputLeft != inputRight: try: img_fn = str(int(time.time()*100))+'.jpg' img_fp = os.path.join(IMG_PATH, img_fn) camera.capture(img_fp) blink_left_right(3) result = clarifai_api.tag_images(open(img_fp)) tags = result['results'][0]['result']['tag']['classes'] if cur_tale: next_tales = tale_dict[cur_tale]['next'] else: next_tales = list() tale_name = pick_tale(tags, next_tales) cur_tale = tale_name if tale_name: lines_to_print = open_tale(tale_name) seen_tales.append(tale_name) else: grammar_dict["N"].extend(tags) if not inputLeft: sentences = [gen.make_polar(grammar_dict, 10, sent=0) for _ in range(10)] elif not inputRight: sentences = [gen.make_polar(grammar_dict, 10) for _ in range(10)] else: sentences = gen.main(grammar_dict, 10) lines_to_print = to_lines(sentences) prefix = '\n\n\nNo. %i\n\n'%imgCount printer.println(prefix+lines_to_print+'\n\n\n') grammar_dict["N"] = list() imgCount += 1 except: blink_left_right(15) print sys.exc_info() elif (not inputLeft) and (not inputRight): offCounter = 0 for _ in range(100): inputLeft, inputRight = map(GPIO.input, butPins) if (not inputLeft) and (not inputRight): time.sleep(0.1) offCounter += 1 if offCounter > 50: os.system('sudo shutdown -h now') else: break
Click here for a Google Drive folder with all the passages from the novel.
]]>This week, I’ve been exhibiting my ongoing project, word.camera, at IDFA DocLab in Amsterdam. My installation consists of four cameras:
During the exhibition, I was also invited to deliver two lectures. Here are my slides from the first lecture:
And here’s a video of the second one:
Visitors are able to reserve the portable cameras for half hour blocks by leaving their ID at the volunteer kiosk. I have really enjoyed watching people borrow and use my cameras.
]]>
My robot will take the form of a benevolent computer virus. Using tools like pyautogui and the python webbrowser library, it will respond to user inquiries by opening documents, typing, and displaying web pages. It will also talk back to users using Apple’s text-to-speech utility.
I am building this robot using Wit.ai, a deep learning tool for making voice interfaces. Using the tool’s dashboard, I have been training my robot to respond to various user intents.
The core of the functionality will be a therapy bot similar to ELIZA, but with some additional functionality. When this project is complete, I believe it will provide an interesting take on artificial intelligence. Using AI tools for different purposes than they were designed, I hope to make users question whether the tool they are using is in fact sentient and aware of their presence.
]]>I started by adding a shutter button to the top of the enclosure. I used a Cherry MX Blue mechanical keyboard switch that I had leftover from a project last year.
The battery and Raspberry Pi just barely fit into the enclosure:
The Raspberry Pi camera module is wedged snugly beneath the camera’s front plate:
In additional to playing the song, I added some functionality that provides a bit of context to the user. Using the pico2wave text-to-speech utility, the camera speaks the tags aloud before playing the song. Additionally, using SoX, the camera plays an initialization tone generated from the color histogram of the image before reading the tags.
Here’s the code that’s currently running on the Raspberry Pi:
from __future__ import unicode_literals import os import json import uuid import time from random import choice as rc from random import sample as rs import re import subprocess import RPi.GPIO as GPIO import picamera from clarifai.client import ClarifaiApi import requests from PIL import Image import sys import threading import spotify import genius_token # SPOTIFY STUFF # Assuming a spotify_appkey.key in the current dir session = spotify.Session() # Process events in the background loop = spotify.EventLoop(session) loop.start() # Connect an audio sink audio = spotify.AlsaSink(session) # Events for coordination logged_in = threading.Event() logged_out = threading.Event() end_of_track = threading.Event() logged_out.set() def on_connection_state_updated(session): if session.connection.state is spotify.ConnectionState.LOGGED_IN: logged_in.set() logged_out.clear() elif session.connection.state is spotify.ConnectionState.LOGGED_OUT: logged_in.clear() logged_out.set() def on_end_of_track(self): end_of_track.set() # Register event listeners session.on( spotify.SessionEvent.CONNECTION_STATE_UPDATED, on_connection_state_updated) session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track) # Assuming a previous login with remember_me=True and a proper logout # session.relogin() # session.login(genius_token.spotify_un, genius_token.spotify_pwd, remember_me=True) # logged_in.wait() # CAMERA STUFF # Init Camera camera = picamera.PiCamera() # Init GPIO GPIO.setmode(GPIO.BCM) # Button Pin GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP) IMGPATH = '/home/pi/soundcamera/img/' clarifai_api = ClarifaiApi() def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in xrange(0, len(l), n): yield l[i:i+n] def take_photo(): fn = str(int(time.time()))+'.jpg' # TODO: Change to timestamp hash fp = IMGPATH+fn camera.capture(fp) return fp def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in xrange(0, len(l), n): yield l[i:i+n] def get_tags(fp): fileObj = open(fp) result = clarifai_api.tag_images(fileObj) resultObj = result['results'][0] tags = resultObj['result']['tag']['classes'] return tags def genius_search(tags): access_token = genius_token.token payload = { 'q': ' '.join(tags), 'access_token': access_token } endpt = 'http://api.genius.com/search' response = requests.get(endpt, params=payload) results = response.json() hits = results['response']['hits'] artists_titles = [] for h in hits: hit_result = h['result'] if hit_result['url'].endswith('lyrics'): artists_titles.append( (hit_result['primary_artist']['name'], hit_result['title']) ) return artists_titles def spotify_search(query): endpt = "https://api.spotify.com/v1/search" payload = { 'q': query, 'type': 'track' } response = requests.get(endpt, params=payload) result = response.json() result_zero = result['tracks']['items'][0] return result_zero['uri'] def main(fn): tags = get_tags(fn) for tag_chunk in chunks(tags,3): artists_titles = genius_search(tag_chunk) for artist, title in artists_titles: try: result_uri = spotify_search(artist+' '+title) except IndexError: pass else: print tag_chunk byline = "%s by %s" % (title, artist) print byline to_read = ', '.join(tag_chunk) + ". " + byline return to_read, result_uri def play_uri(track_uri): # Play a track # audio = spotify.AlsaSink(session) session.login(genius_token.spotify_un, genius_token.spotify_pwd, remember_me=True) logged_in.wait() track = session.get_track(track_uri).load() session.player.load(track) session.player.play() def stop_track(): session.player.play(False) session.player.unload() session.logout() logged_out.wait() audio._close() def talk(msg): proc = subprocess.Popen( ['bash', '/home/pi/soundcamera/play_text.sh', msg] ) proc.communicate() def play_tone(freqs): freq1, freq2 = freqs proc = subprocess.Popen( ['play', '-n', 'synth', '0.25', 'saw', "%i-%i" % (freq1, freq2)] ) proc.communicate() def histo_tone(fp): im = Image.open(fp) hist = im.histogram() vals = map(sum, chunks(hist, 64)) # list of 12 values print vals map(play_tone, chunks(vals,2)) if __name__ == "__main__": input_state = True new_state = True hold_counter = 0 while 1: input_state = GPIO.input(18) if not (input_state and new_state): talk("capturing") # Hold for 15 seconds to turn off while not GPIO.input(18): time.sleep(0.1) hold_counter += 1 if hold_counter > 150: os.system('shutdown now -h') sys.exit() # Reset hold counter hold_counter = 0 # Else take photo try: img_fp = take_photo() msg, uri = main(img_fp) histo_tone(img_fp) talk(msg) play_uri(uri) except: print sys.exc_info() # Wait for playback to complete or Ctrl+C try: while not end_of_track.wait(0.1): # If new photo, play new song new_state = GPIO.input(18) if not new_state: stop_track() # time.sleep(2) break except KeyboardInterrupt: pass
]]>
The front-end JavaScript code is available on GitHub. Here is the primary back-end Python code:
import os import json import uuid from base64 import decodestring import time from random import choice as rc from random import sample as rs import re import PIL from PIL import Image import requests import exifread from flask import Flask, request, abort, jsonify from flask.ext.cors import CORS from werkzeug import secure_filename from clarifai.client import ClarifaiApi app = Flask(__name__) CORS(app) app.config['UPLOAD_FOLDER'] = '/var/www/SoundCamera/SoundCamera/static/img' IMGPATH = '/var/www/SoundCamera/SoundCamera/static/img/' clarifai_api = ClarifaiApi() @app.route("/") def index(): return "These aren't the droids you're looking for." @app.route("/img", methods=["POST"]) def img(): request.get_data() if request.method == "POST": f = request.files['file'] if f: filename = secure_filename(f.filename) f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) new_filename = resize_image(filename) return jsonify(uri=main(new_filename)) else: abort(501) @app.route("/b64", methods=["POST"]) def base64(): if request.method == "POST": fstring = request.form['base64str'] filename = str(uuid.uuid4())+'.jpg' file_obj = open(IMGPATH+filename, 'w') file_obj.write(fstring.decode('base64')) file_obj.close() return jsonify(uri=main(filename)) @app.route("/url") def url(): img_url = request.args.get('url') response = requests.get(img_url, stream=True) orig_filename = img_url.split('/')[-1] if response.status_code == 200: with open(IMGPATH+orig_filename, 'wb') as f: for chunk in response.iter_content(1024): f.write(chunk) new_filename = resize_image(orig_filename) return jsonify(uri=main(new_filename)) else: abort(500) # def allowed_img_file(filename): # return '.' in filename and \ # filename.rsplit('.', 1)[1].lower() in set(['.jpg', '.jpeg', '.png']) def resize_image(fn): longedge = 640 orientDict = { 1: (0, 1), 2: (0, PIL.Image.FLIP_LEFT_RIGHT), 3: (-180, 1), 4: (0, PIL.Image.FLIP_TOP_BOTTOM), 5: (-90, PIL.Image.FLIP_LEFT_RIGHT), 6: (-90, 1), 7: (90, PIL.Image.FLIP_LEFT_RIGHT), 8: (90, 1) } imgOriList = [] try: f = open(IMGPATH+fn, "rb") exifTags = exifread.process_file(f, details=False, stop_tag='Image Orientation') if 'Image Orientation' in exifTags: imgOriList.extend(exifTags['Image Orientation'].values) except: pass img = Image.open(IMGPATH+fn) w, h = img.size newName = str(uuid.uuid4())+'.jpeg' if w >= h: wpercent = (longedge/float(w)) hsize = int((float(h)*float(wpercent))) img = img.resize((longedge,hsize), PIL.Image.ANTIALIAS) else: hpercent = (longedge/float(h)) wsize = int((float(w)*float(hpercent))) img = img.resize((wsize,longedge), PIL.Image.ANTIALIAS) for val in imgOriList: if val in orientDict: deg, flip = orientDict[val] img = img.rotate(deg) if flip != 1: img = img.transpose(flip) img.save(IMGPATH+newName, format='JPEG') os.remove(IMGPATH+fn) return newName def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in xrange(0, len(l), n): yield l[i:i+n] def get_tags(fp): fileObj = open(fp) result = clarifai_api.tag_images(fileObj) resultObj = result['results'][0] tags = resultObj['result']['tag']['classes'] return tags def genius_search(tags): access_token = 'd2IuV9fGKzYEWVnzmLVtFnm-EYvBQKR8Uh3I1cfZOdr8j-BGVTPThDES532dym5a' payload = { 'q': ' '.join(tags), 'access_token': access_token } endpt = 'http://api.genius.com/search' response = requests.get(endpt, params=payload) results = response.json() hits = results['response']['hits'] artists_titles = [] for h in hits: hit_result = h['result'] if hit_result['url'].endswith('lyrics'): artists_titles.append( (hit_result['primary_artist']['name'], hit_result['title']) ) return artists_titles def spotify_search(query): endpt = "https://api.spotify.com/v1/search" payload = { 'q': query, 'type': 'track' } response = requests.get(endpt, params=payload) result = response.json() result_zero = result['tracks']['items'][0] return result_zero['uri'] def main(fn): tags = get_tags(IMGPATH+fn) for tag_chunk in chunks(tags,3): artists_titles = genius_search(tag_chunk) for artist, title in artists_titles: try: result_uri = spotify_search(artist+' '+title) except IndexError: pass else: return result_uri if __name__ == "__main__": app.run()
It uses the same algorithm discussed in my prior post. Now that I have the opportunity to test it more, I am not quite satisfied with the results it is providing. First of all, they are not entirely deterministic (you can upload the same photo twice and end up with two different songs in some cases). Moreover, the results from a human face — which I expect to be a common use case — are not very personal. For the next steps in this project, I plan to integrate additional data including GPS, weather, time of day, and possibly even facial expressions in order to improve the output.
The broken cameras I ordered from eBay have arrived, and I have been considering how to use them as cases for the new models. I also purchased a GPS module for my Raspberry Pi, so the next Sound Camera prototype, with new features integrated, will likely be a physical version. I’m planning to use this Kodak Brownie camera (c. 1916):
]]>
This was the primary inspiration for my latest project, which features Kurt Vonnegut’s complete works, analyzed for sentiment, and visualized as interactive word clouds. I developed it entirely in front-end JavaScript, and it’s currently hosted on GitHub pages: rossgoodwin.com/vonnegut
Users can scrub through the sentiment graph of each book from start to finish and see a word cloud displayed for each position on the slider. Each word cloud represents 10 paragraphs of the book. Along with the rises and dips in the graph, sentiment values are indicated by the color of the word cloud text, which ranges from dark green (highly positive) to bright red (highly negative).
Rather than simply using word count or frequency for the size of the words, I used TF-IDF scores. (Each 10 paragraph block was treated as one document, and each book was treated as an independent set of documents.) As a result, the largest words in each word cloud are those that make their respective section unique in the context of the entire book.
The first steps in creating this project were to parse Vonnegut’s books, perform TF-IDF calculations for each word and sentiment analysis for each 10-paragraph segment, then store the resulting data in a set of JSON files. Here are the iPython Notebooks where I completed these steps:
Once I had the JSON files, I used D3 to create the word clouds and Chart.js to create the line graphs. The sliders are HTML range inputs, modified with custom CSS. I wanted to create the appearance of long, semi-transparent planchettes sliding over the graphs. Getting the sliders to line up with the graphs precisely was particularly challenging, as was providing the option to click on the graphs in any location and automatically move the sliders to that location.
Here is my JavaScript code, in its current state:
(function() { Number.prototype.map = function (in_min, in_max, out_min, out_max) { return (this - in_min) * (out_max - out_min) / (in_max - in_min) + out_min; } function titleCase(str) { return str.replace(/\w\S*/g, function(txt){return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();}); } // Charts.js global config Chart.defaults.global.animation = false; Chart.defaults.global.tooltipEvents = []; Chart.defaults.global.scaleFontFamily = "'Cousine', monospace"; Chart.defaults.global.showScale = false; // var spectrum = ['#F22613', '#E74C3C', '#D35400', '#F2784B', '#95A5A6', '#68C3A3', '#4DAF7C', '#3FC380', '#2ECC71']; var spectrum = ["#f22613", "#f25749", "#f28379", "#f2b0aa", "#95a5a6", "#add9c2", "#74b391", "#45996c", "#1e824c"]; $("#key-block").append( '<div id=\"key-text-box\"><p class=\"text-center lead small\" style=\"margin-left: 7px;\"><<< negative | positive >>></p></div>' ); spectrum.map(function(hex){ $("#key-block").append( '<div class=\"key-color\" style=\"background-color:'+hex+';\"></div>' ); }); function updateCloud(bookslug, section) { $.getJSON("data/vonnegut-"+section+".json", function(data){ // var factor = Math.pow(data[bookslug]['tfidf'].length, 2); var layout = d3.layout.cloud() .size([800, 500]) .words(data[bookslug]['tfidf'].map(function(d) { return {text: d[0], size: d[1] * 500}; })) .padding(3) .rotate(function() { return 0; }) // return ~~(Math.random() * 2) * 90 .font("Cousine") .fontSize(function(d) { return d.size; }) .on("end", draw); layout.start(); function draw(words) { var overallContainer = d3.select("#"+bookslug); overallContainer.select("svg").remove(); overallContainer.select("a").remove(); var svgContainer = overallContainer.append("svg") .attr("width", layout.size()[0]) .attr("height", layout.size()[1]) .attr("class", "svg-cont"); var wordCloud = svgContainer.append("g") .attr("transform", "translate(" + layout.size()[0] / 2 + "," + layout.size()[1] / 2 + ")") .selectAll("text") .data(words) .enter().append("text") .transition().duration(500) .style("font-size", function(d) { return d.size + "px"; }) .style("font-family", "Cousine") .style("fill", function(d, i) { var sentiment = data[bookslug]['sentiment']; var ix = Math.floor(((sentiment + 1)/2)*spectrum.length); return spectrum[ix]; }) .attr("text-anchor", "middle") .attr("transform", function(d) { return "translate(" + [d.x, d.y] + ")rotate(" + d.rotate + ")"; }) .text(function(d) { return d.text; }); var title = titleCase(data[bookslug]['title']); var labelText = overallContainer .append("a") .attr("href", "http://www.amazon.com/exec/obidos/external-search/?field-keywords=%s"+title+"&mode=blended") .attr("class", "twitter-link") .attr("target", "_blank") .text(title); overallContainer.transition() .style("opacity", 1.0) .delay(1000) .duration(3000); } }); } $.getJSON("data/sentiment.json", function(sent){ $.getJSON("data/vonnegut-0.json", function(data){ $("#loadinggif").fadeOut("slow"); Object.keys(data).sort().map(function(slug){ $("#vis").append( '<div id=\"'+slug+'\" class=\"col-md-12 transparent text-center\"></div>' ); $("#"+slug).append( '<canvas class="chart-canvas" id=\"'+slug+'-chart\" width=\"800\" height=\"150\"></canvas>' ); var ctx = document.getElementById(slug+"-chart").getContext("2d"); var xLabels = []; for (var i=0;i<data[slug]['length'];i++) { xLabels.push(''); } var chartData = { labels: xLabels, datasets: [ { label: titleCase(data[slug]['title']), fillColor: "rgba(210, 215, 211, 0.7)", strokeColor: "rgba(189, 195, 199, 1)", pointColor: "rgba(210, 215, 211, 1)", pointStrokeColor: "#fff", pointHighlightFill: "#fff", pointHighlightStroke: "rgba(220,220,220,1)", data: sent[slug] } ] }; var chartOptions = { pointDot : false, pointHitDetectionRadius : 5, scaleShowVerticalLines: false, bezierCurve: false }; var myNewChart = new Chart(ctx).Line(chartData, chartOptions); var stepCount = data[slug]['length'] - 1; $("#"+slug).append( '<div class=\"scrubber\"><input id=\"'+slug+'-scrub\" type=\"range\" min=\"0\" max=\"'+stepCount+'\" value=\"0\" step=\"1\"></div>' ); $("#"+slug+"-chart").on("click", function(evt){ var activePoints = myNewChart.getPointsAtEvent(evt); var xPos = activePoints[Math.floor(activePoints.length/2)].x; var ix = Math.floor(xPos.map(0, 800, 0, data[slug]['length'])); console.log(xPos); console.log(ix); $('#'+slug+'-scrub').val(ix); updateCloud(slug, ix); }); // Play Button $('#'+slug).append( '<button type=\"button\" id=\"'+slug+'-btn\" class=\"btn btn-default btn-xs play-btn\" aria-label=\"Play\"><span class=\"glyphicon glyphicon-play\" aria-hidden=\"true\"></span></button>' ); $('#'+slug).append( '<button type=\"button\" id=\"'+slug+'-btn-pause\" class=\"btn btn-default btn-xs play-btn\" aria-label=\"Pause\"><span class=\"glyphicon glyphicon-pause\" aria-hidden=\"true\"></span></button>' ); // Load First Clouds updateCloud(slug, 0); var play; $('#'+slug+'-btn').click(function(){ console.log('clicked ' + slug); autoAdvance(); play = setInterval(function(){ autoAdvance(); }, 5000); function autoAdvance(){ var scrubVal = $('#'+slug+'-scrub').val(); console.log(data[slug]['length']); if (scrubVal >= data[slug]['length']-1) { console.log("EOR"); clearInterval(play); } console.log(scrubVal); var newVal = parseInt(scrubVal, 10) + 1; $('#'+slug+'-scrub').val(newVal); updateCloud(slug, newVal); } }); $('#'+slug+'-btn-pause').click(function(){ clearInterval(play); }); $("#"+slug+"-scrub").on("input", function(){ var sectNo = $(this).val(); console.log(sectNo); updateCloud(slug, sectNo); }); }); }); }); })();
The rest of my front-end code can be found on GitHub.
]]>My code:
iPython Notebook
The script I wrote gets concept words from the image via Clarifai, then searches song lyrics for those words on Genius, then finds the song on Spotify. Below are some images I put through the algorithm. You can click on each one to hear the song that resulted, though you will need to login to Spotify to do so.
The next step will be to get this code working on a Raspberry Pi inside one of the film camera bodies I just received via eBay.
]]>
We began by making graphs of the concepts related to our topics. Here’s mine:
I then looked at the established definition and etymology of the word [source]:
The Wikipedia article provided a bit more detail, and a more satisfying definition of the word:
A doodle is a drawing made while a person’s attention is otherwise occupied.
Much of the information on Wikipedia came from a 2009 NPR story on doodles, which made reference to a scholarly article and related study by Professor Jackie Andrade at the University of Plymouth.
Before venturing any further outward with my research, I should take a step inward and relate honestly w/r/t my own status: I am a prolific doodler. I have been doodling in my notes for as long as I’ve been taking notes, which likely started sometime in elementary school. Unfortunately, my earliest doodles are lost to time, doubtlessly recycled several times over and presently inhabiting the toilet tissue of some faraway place. However, many of my doodles from college remain archived securely in a storage bin in my parents’ garage in California. I may need to send for them.
The oldest doodles of mine that I can readily access are some of those I made in high school / early in college. I can access them because I doodled in Microsoft Paint at the time, and I’ve saved the contents of all my prior computers’ hard drives in an ever-expanding nested structure of folders on Google Drive. I drew these with a computer mouse:
The above examples could only questionably be called doodles, as they were drawn with a fair amount of attention. Here are some more recent (and more genuine) doodles, as photographed in one of my recent notebooks:
N.B. It’s candle wax. Don’t ask.
As you can see, tesseracts are my go-to doodle design. I learned how to draw one a few years ago. It was during a difficult time in my life—I was in the hospital, and another patient showed me his technique. I’ve been doodling almost nothing but tesseracts ever since.
I considered why, exactly, I almost always choose to doodle when taking notes, even during the most fascinating lectures, and the best answer I can surmise is that it’s an activity that helps me process information. When I was young, I had difficulty maintaining my attention while listening to my teachers. I felt as though my mind was too restless to take in information in the slow, auditory tradition of the classroom. When reading, I could move as quickly as I desired, but when listening to my teachers I was confined to the pace they set for all their students. By doodling, I could keep my mind occupied enough to listen.
Fortunately, listening in class is no longer a serious challenge for me, but I cling to my doodling like a child with a security blanket. And I don’t think it’s a coincidence that I typically choose to doodle a four-dimensional mathematical diagram. When I’m listening to a lecture, part of my brain is listening and part of it is processing the information, connecting it to other topics and areas of interest. The extra-dimensionality of the tesseract somehow echoes the act of listening for me: a marriage of the three-dimensional lecture proceeding in front of me, and the extra dimension of synthesis happening inside my skull.
As a result of this contemplation, I don’t find Professor Andrade’s conclusion very surprising from the aforementioned study:
Unlike many dual task situations, doodling while working can be beneficial.
The professor had two groups of people listen to a monotonous telephone message listing the names of people coming to a party. She asked one group to shade printed shapes, and the other group to simply listen. When presented with a surprise quiz, the doodling group recalled 29% more information than the control group.
I’m still not sure exactly which aspect of doodling I want to explore for my project. To me, the most interesting aspects of the activity are linked to:
My first experiment will almost certainly involve collecting and compiling more doodles from wherever I can find them. Stay tuned for the results of that search.
]]>
I have not yet chosen the authors I plan to embed in these cameras, or how I plan to present the extracted text. I also have tentative plans for a new iteration of the talking surveillance camera I developed last semester, but more on that will be provided in future posts.
This week, I spent some time on eBay finding a few broken medium- and large-format cameras to use as cases. Here’s what I bought (for $5 to $25 each):
I am current waiting to receive them so that I can start planning the builds. Below is a list of the additional parts that will be required for each camera:
Raspberry Pi 2 ($40)
85.60mm x 56mm x 21mm (or roughly 3.37″ x 2.21″ x 0.83″)
Raspberry Pi Camera Board ($30)
25mm x 20mm x 9mm
Buck Converter ($10)
51 * 26.3 * 14 (L * W * H) (mm)
7.4V LiIon Battery Pack ($90)
22mm (0.9″) x 104mm (4.1″) x 107mm (4.2″)
OR two USB batteries ($40)
Thermal Printer ($25 from China or $50 from U.S.)
~4 1/8″ (105mm) x 2 1/4″ (58mm) for rectangular hole
~58mm deep
On/Off Switch ($1)
18.60mm x 12.40mm rectangular hole
13.9mm deep
LED Button ($5)
Shutter button, user will hold for 3 seconds to turn off Raspberry Pi
16mm round hole
~1.5″ deep
1/4-size permaproto board ($3)
1/4″ Acrylic ($12) or Broken Medium Format TLR ($30-69)
Jumper Wires ($2)
]]>Last week, Joanna Wrzaszczyk and I completed the first version of our dynamic light sculpture, inspired by Italo Calvino’s Invisible Cities and the Traveling Salesman Problem. We have decided to call it the Traveler’s Lamp.
Here is the midterm presentation that Joanna and I delivered in March:
We received a lot of feedback after that presentation, which resulted in a number of revisions to the lamp’s overall design. Here are some sketches I made during that process:
Since that presentation, Joanna and I successfully designed and printed ten city-nodes for the lamp. Here is the deck from our final presentation, which contains renderings of all the city-nodes:
We built the structure from laser-cut acrylic, fishing line, and 38-gauge wire. The top and base plates of the acrylic scaffolding are laser etched with the first and last page, respectively, from Invisible Cities. We fabricated the wood base on ITP’s CNC router from 3/4″ plywood.
Here are some photos of the assembled lamp:
Here’s a sketch, by Joanna, of the x-y-z coordinate plot that we fed into the computer program:
And finally, here’s some of the Python code that’s running on the Raspberry Pi:
def tsp(): startingPin = random.choice(pins) pins.remove(startingPin) GPIO.output(startingPin, True) sleep(0.5) distances = [] for i in range(pins): for p in pins: dist = distance(locDict[startingPin], locDict[p]) distances.append((dist, p)) GPIO.output(p, True) sleep(0.5) GPIO.output(p, False) distances = sorted(distances, key=lambda x: x[0]) nextPin = distances[0][1] GPIO.output(nextPin, True) sleep(0.5) pins.remove(nextPin) startingPin = nextPin]]>