Whats the difference between this script:
import random
import argparse
import asyncio
from typing import Tuple
import csv
import os
import nltk
from transformers import pipeline, Pipeline
from pythonosc import dispatcher, osc_server, udp_client
from pythonosc.udp_client import SimpleUDPClient
nltk.data.path.append(‘C:/Users/Florence Nightingale/AppData/Roaming/nltk_data’)
new_message = 0
file_path = r’C:\Users\Florence Nightingale\Documents\Florence Nightingale Living Portrait\InputHistory.csv’
class Config:
"""
Configuration class for default values and settings.
LOCAL_IP (str): IP address for the local machine.
TABLET_IP (str): IP address for the tablet.
LOCAL_PORT (int): Port for the local machine to listen on.
TD_PORT (int): Port for the TD (transmitting device).
TABLET_PORT (int): Port for the tablet.
OFFENSIVE_MODEL (str): Model name for offensive content classification.
CONTEXT_MODEL (str): Model name for context classification.
ZERO_SHOT_LABELS (list): List of labels for zero-shot classification.
TABLET_IP = "192.168.0.3"
OFFENSIVE_MODEL = "unitary/toxic-bert"
CONTEXT_MODEL = "facebook/bart-large-mnli"
ZERO_SHOT_LABELS = ["angel", "hero", "feminist", "rebel"]
class FlorenceAttributes:
"""A class to store and manipulate sentence classification attributes."""
LABELS = Config.ZERO_SHOT_LABELS
primary_value: int = None,
secondary_value: int = None,
tertiary_value: int = None,
quaternary_value: int = None,
ambiguous: bool = False):
Initialize the FlorenceAttributes class.
primary (str): Primary label.
secondary (str): Secondary label.
tertiary (str): Tertiary label.
quaternary (str): Quaternary label.
primary_value (int): Value of the primary label.
secondary_value (int): Value of the secondary label.
tertiary_value (int): Value of the tertiary label.
quaternary_value (int): Value of the quaternary label.
ambiguous (bool): Whether the attributes are ambiguous.
self.secondary = secondary
self.quaternary = quaternary
self.primary_value = primary_value
self.secondary_value = secondary_value
self.tertiary_value = tertiary_value
self.quaternary_value = quaternary_value
self.ambiguous = ambiguous
"""Return a string representation of the attributes."""
return (f'PRIMARY: "{self.primary}" = {self.primary_value}, '
f'SECONDARY: "{self.secondary}" = {self.secondary_value}, '
f'TERTIARY: "{self.tertiary}" = {self.tertiary_value}, '
f'QUATERNARY: "{self.quaternary}" = {self.quaternary_value}')
def has_null_labels(self) -> bool:
"""Check if any labels are null or 'None'."""
return any(label in ("", "None") for label in [self.primary, self.secondary, self.tertiary, self.quaternary])
def has_null_values(self) -> bool:
"""Check if any values are null or zero."""
return any(value in (0, None) for value in
[self.primary_value, self.secondary_value, self.tertiary_value, self.quaternary_value])
def fill_missing_labels(self):
"""Fill missing labels with available labels from the predefined list."""
used_labels = {label for label in [self.primary, self.secondary, self.tertiary, self.quaternary] if label}
free_labels = [label for label in self.LABELS if label not in used_labels]
self.primary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.secondary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.tertiary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.quaternary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
def reorganize_data(self) -> str:
"""Reorganize the data for display and increment the message counter."""
labels_values = zip([self.primary, self.secondary, self.tertiary, self.quaternary],
[self.primary_value, self.secondary_value, self.tertiary_value, self.quaternary_value])
result = [f"{label}: {value}" for label, value in labels_values if label in self.LABELS and value is not None]
result.append(f"/new_message {new_message}")
def generate_final_message(self, word_response: str) -> str:
"""Generate the final message including the word response and increment the message counter."""
return (f"{self.primary} = {self.primary_value}, "
f"{self.secondary} = {self.secondary_value}, "
f"{self.tertiary} = {self.tertiary_value}, "
f"{self.quaternary} = {self.quaternary_value}, "
f"newMessage = {new_message}, "
f"wordResponse = {word_response}")
def parse_args():
"""
Parse command-line arguments.
argparse.Namespace: Parsed arguments with default values.
parser = argparse.ArgumentParser(description="OSC Server for handling Unity messages")
parser.add_argument("--local_ip", default=Config.LOCAL_IP, help="The IP of the python script")
parser.add_argument("--local_port", default=Config.LOCAL_PORT, help="The port of the python script")
parser.add_argument("--td_ip", default=Config.LOCAL_IP, help="The IP of the Touch Designer app")
parser.add_argument("--td_port", type=int, default=Config.TD_PORT, help="The port of the Touch Designer app")
parser.add_argument("--unity_ip", default=Config.TABLET_IP, help="The IP of the Unity client")
parser.add_argument("--unity_port", type=int, default=Config.TABLET_PORT, help="The port of the Unity client")
return parser.parse_args()
def is_offensive(word: str, offensive_classifier: Pipeline) -> bool:
"""
Check if a word is offensive using a text classification model.
word (str): The word to check.
offensive_classifier (Pipeline): The classifier for detecting offensive content.
bool: True if the word is classified as offensive, False otherwise.
result = offensive_classifier(word)
offensive = any(res['label'] == 'toxic' and res['score'] > 0.5 for res in result)
print(f"Checking if word '{word}' is offensive: {result}")
def is_contextually_relevant(word: str, context_classifier: Pipeline) -> bool:
"""
Check if a word is contextually relevant using a zero-shot classification model.
word (str): The word to check.
context_classifier (Pipeline): The classifier for context detection.
bool: True if the word is contextually relevant, False otherwise.
result = context_classifier(word, candidate_labels=Config.ZERO_SHOT_LABELS)
relevant = any(score > 0.3 for score in result['scores']) # Adjusted threshold for relevance
print(f"Checking if word '{word}' is contextually relevant: {result}")
def extract_word_response(sentence: str, offensive_classifier: Pipeline, context_classifier: Pipeline) -> str:
"""
Extract a contextually relevant and non-offensive word from a sentence.
sentence (str): The input sentence.
offensive_classifier (Pipeline): The classifier for detecting offensive content.
context_classifier (Pipeline): The classifier for context detection.
str: A relevant word if found, otherwise a blank string.
words = nltk.word_tokenize(sentence)
tagged_words = nltk.pos_tag(words)
print(f"Tagged words: {tagged_words}")
relevant_words = [word for word, pos in tagged_words if
pos in ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
and not is_offensive(word, offensive_classifier)]
print(f"Relevant Adjectives, Proper Nouns, and Nouns: {relevant_words}")
for word in relevant_words:
if is_contextually_relevant(word, context_classifier):
print(f"Chosen word: {word}")
return " " # Return blank if no relevant word is found or if it's offensive
def analyze_sentence(sentence: str) -> dict:
"""
Analyze a sentence for contextual relevance using a zero-shot classification model.
sentence (str): The input sentence.
dict: A dictionary with labels as keys and their corresponding scores as values.
classifier = pipeline("zero-shot-classification", model=Config.CONTEXT_MODEL)
result = classifier(sentence, candidate_labels=Config.ZERO_SHOT_LABELS)
weightings = {label: int(score * 100) for label, score in zip(result['labels'], result['scores'])}
async def classify_sentence(sentence: str, offensive_classifier: Pipeline, context_classifier: Pipeline)
-> Tuple[str, FlorenceAttributes, int, str]:
"""
Classify a sentence, extract relevant words, and generate a final message.
sentence (str): The input sentence to classify.
offensive_classifier (Pipeline):
context_classifier (Pipeline):
tuple: A tuple containing the final message, attributes object, message counter, and word response.
# PART 1: Analyze Sentence using one-shot classification
weightings = analyze_sentence(sentence)
sorted_weightings = sorted(weightings.items(), key=lambda item: item[1], reverse=True)
primary, primary_value = sorted_weightings[0]
secondary, secondary_value = sorted_weightings[1]
tertiary, tertiary_value = sorted_weightings[2]
quaternary, quaternary_value = sorted_weightings[3]
attr1 = FlorenceAttributes(
primary_value=primary_value,
secondary_value=secondary_value,
tertiary_value=tertiary_value,
quaternary_value=quaternary_value,
# PART 2: Extract important words to be used as typographic elements
word_response = extract_word_response(sentence, offensive_classifier, context_classifier)
print(f"Weightings: {weightings}, Word response: {word_response}") # Debug statement
return str(attr1.generate_final_message(word_response)).strip(), attr1, new_message, word_response
async def handle_osc_message(unity_client: SimpleUDPClient,
td_client: SimpleUDPClient,
unused_addr,
offensive_classifier: Pipeline,
context_classifier: Pipeline,
sentence):
"""
Handle incoming OSC messages and process them.
unity_client: The UDP client to send responses to Unity (tablet).
td_client: The UDP client to send responses to Touch Designer.
unused_addr: The address of the sender.
offensive_classifier: The classifier for detecting offensive content.
context_classifier: The classifier for context detection.
*sentence: Additional arguments from the OSC message.
print("I received message from Unity")
response, attribute, new_msg, word_response = await classify_sentence(sentence,
print(f"Received response from classify_sentence: {response}, {attribute}, {new_msg}, {word_response}")
save_string_to_csv(file_path, f"{sentence} , {attribute}, {word_response}" )
unity_client.send_message("/response", response)
td_client.send_message(f"/{attribute.primary}", attribute.primary_value)
td_client.send_message(f"/{attribute.secondary}", attribute.secondary_value)
td_client.send_message(f"/{attribute.tertiary}", attribute.tertiary_value)
td_client.send_message(f"/{attribute.quaternary}", attribute.quaternary_value)
td_client.send_message("/newMessage", new_msg)
td_client.send_message("/wordResponse", word_response)
td_client.send_message("/message", f"Message received: '{sentence}'")
def save_string_to_csv(file_path, string_to_save):
# Ensure the directory exists
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Open the CSV file in append mode, creating it if it doesn't exist
with open(file_path, mode='a', newline='') as file:
writer = csv.writer(file)
# Write the string as a new row
writer.writerow([string_to_save])
def main():
"""
Main function to set up and run the OSC server.
"""
unity_client = udp_client.SimpleUDPClient(args.unity_ip, args.unity_port)
td_client = udp_client.SimpleUDPClient(args.td_ip, args.td_port)
offensive_classifier = pipeline("text-classification", model=Config.OFFENSIVE_MODEL)
context_classifier = pipeline("zero-shot-classification", model=Config.CONTEXT_MODEL)
disp = dispatcher.Dispatcher()
disp.map("/florence_sentence", lambda unused_addr, *osc_args: asyncio.run(
handle_osc_message(unity_client, td_client, unused_addr,
offensive_classifier, context_classifier,
server = osc_server.ThreadingOSCUDPServer((args.local_ip, args.local_port), disp)
print(f"Serving on {server.server_address}")
if name == “main”:
main()
And this one:
import random
import argparse
import asyncio
import csv
import os
import nltk
from transformers import pipeline
from pythonosc import dispatcher, osc_server, udp_client
nltk.data.path.append(‘C:/Users/Florence Nightingale/AppData/Roaming/nltk_data’)
new_message = 0
file_path = r’C:\Users\Florence Nightingale\Documents\Florence Nightingale Living Portrait\InputHistory.csv’
class Config:
LOCAL_IP = “192.168.0.2”
TABLET_IP = “192.168.0.3”
LOCAL_PORT = 8000
TD_PORT = 9000
TABLET_PORT = 10000
OFFENSIVE_MODEL = “unitary/toxic-bert”
CONTEXT_MODEL = “facebook/bart-large-mnli”
ZERO_SHOT_LABELS = [“angel”, “hero”, “feminist”, “rebel”]
class FlorenceAttributes:
LABELS = Config.ZERO_SHOT_LABELS
def __init__(self, primary=None, secondary=None, tertiary=None, quaternary=None, primary_value=None,
secondary_value=None, tertiary_value=None, quaternary_value=None, ambiguous=False):
self.secondary = secondary
self.quaternary = quaternary
self.primary_value = primary_value
self.secondary_value = secondary_value
self.tertiary_value = tertiary_value
self.quaternary_value = quaternary_value
self.ambiguous = ambiguous
return (f'PRIMARY: "{self.primary}" = {self.primary_value}, '
f'SECONDARY: "{self.secondary}" = {self.secondary_value}, '
f'TERTIARY: "{self.tertiary}" = {self.tertiary_value}, '
f'QUATERNARY: "{self.quaternary}" = {self.quaternary_value}')
def has_null_labels(self):
return any(label in ("", "None") for label in [self.primary, self.secondary, self.tertiary, self.quaternary])
def has_null_values(self):
return any(value in (0, None) for value in [self.primary_value, self.secondary_value, self.tertiary_value, self.quaternary_value])
def fill_missing_labels(self):
used_labels = {label for label in [self.primary, self.secondary, self.tertiary, self.quaternary] if label}
free_labels = [label for label in self.LABELS if label not in used_labels]
self.primary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.secondary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.tertiary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
self.quaternary = free_labels.pop() if len(free_labels) == 1 else random.choice(free_labels)
def reorganize_data(self):
labels_values = zip([self.primary, self.secondary, self.tertiary, self.quaternary],
[self.primary_value, self.secondary_value, self.tertiary_value, self.quaternary_value])
result = [f"{label}: {value}" for label, value in labels_values if label in self.LABELS and value is not None]
result.append(f"/new_message {new_message}")
def generate_final_message(self, word_response):
return (f"{self.primary} = {self.primary_value}, "
f"{self.secondary} = {self.secondary_value}, "
f"{self.tertiary} = {self.tertiary_value}, "
f"{self.quaternary} = {self.quaternary_value}, "
f"newMessage = {new_message}, "
f"wordResponse = {word_response}")
def parse_args():
parser = argparse.ArgumentParser(description=“OSC Server for handling Unity messages”)
parser.add_argument(“—local_ip”, default=Config.LOCAL_IP, help=“The IP of the python script”)
parser.add_argument(“—local_port”, default=Config.LOCAL_PORT, help=“The port of the python script”)
parser.add_argument(“—td_ip”, default=Config.LOCAL_IP, help=“The IP of the Touch Designer app”)
parser.add_argument(“—td_port”, type=int, default=Config.TD_PORT, help=“The port of the Touch Designer app”)
parser.add_argument(“—unity_ip”, default=Config.TABLET_IP, help=“The IP of the Unity client”)
parser.add_argument(“—unity_port”, type=int, default=Config.TABLET_PORT, help=“The port of the Unity client”)
return parser.parse_args()
def is_offensive(word, offensive_classifier):
result = offensive_classifier(word)
offensive = any(res[‘label’] == ‘toxic’ and res[‘score’] > 0.5 for res in result)
print(f”Checking if word ‘{word}’ is offensive: {result}”)
return offensive
def is_contextually_relevant(word, context_classifier):
result = context_classifier(word, candidate_labels=Config.ZERO_SHOT_LABELS)
relevant = any(score > 0.3 for score in result[‘scores’]) # Adjusted threshold for relevance
print(f”Checking if word ‘{word}’ is contextually relevant: {result}”)
return relevant
def extract_word_response(sentence, offensive_classifier, context_classifier):
words = nltk.word_tokenize(sentence)
tagged_words = nltk.pos_tag(words)
print(f"Tagged words: {tagged_words}")
relevant_words = [word for word, pos in tagged_words if
pos in ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS')
and not is_offensive(word, offensive_classifier)]
print(f"Relevant Adjectives, Proper Nouns, and Nouns: {relevant_words}")
for word in relevant_words:
if is_contextually_relevant(word, context_classifier):
print(f"Chosen word: {word}")
return " " # Return blank if no relevant word is found or if it's offensive
def analyze_sentence(sentence):
classifier = pipeline(“zero-shot-classification”, model=Config.CONTEXT_MODEL)
result = classifier(sentence, candidate_labels=Config.ZERO_SHOT_LABELS)
weightings = {label: int(score * 100) for label, score in zip(result[‘labels’], result[‘scores’])}
return weightings
async def classify_sentence(sentence, offensive_classifier, context_classifier):
weightings = analyze_sentence(sentence)
sorted_weightings = sorted(weightings.items(), key=lambda item: item[1], reverse=True)
primary, primary_value = sorted_weightings[0]
secondary, secondary_value = sorted_weightings[1]
tertiary, tertiary_value = sorted_weightings[2]
quaternary, quaternary_value = sorted_weightings[3]
attr1 = FlorenceAttributes(
primary_value=primary_value,
secondary_value=secondary_value,
tertiary_value=tertiary_value,
quaternary_value=quaternary_value,
word_response = extract_word_response(sentence, offensive_classifier, context_classifier)
print(f"Weightings: {weightings}, Word response: {word_response}")
return str(attr1.generate_final_message(word_response)).strip(), attr1, new_message, word_response
async def handle_osc_message(unity_client, td_client, unused_addr, offensive_classifier, context_classifier, sentence):
print(“I received message from Unity”)
response, attribute, new_msg, word_response = await classify_sentence(sentence, offensive_classifier, context_classifier)
print(f"Received response from classify_sentence: {response}, {attribute}, {new_msg}, {word_response}")
save_string_to_csv(file_path, f"{sentence} , {attribute}, {word_response}")
unity_client.send_message("/response", response)
td_client.send_message(f"/{attribute.primary}", attribute.primary_value)
td_client.send_message(f"/{attribute.secondary}", attribute.secondary_value)
td_client.send_message(f"/{attribute.tertiary}", attribute.tertiary_value)
td_client.send_message(f"/{attribute.quaternary}", attribute.quaternary_value)
td_client.send_message("/newMessage", new_msg)
td_client.send_message("/wordResponse", word_response)
td_client.send_message("/message", f"Message received: '{sentence}'")
def save_string_to_csv(file_path, string_to_save):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, mode=‘a’, newline=”) as file:
writer = csv.writer(file)
writer.writerow([string_to_save])
def main():
args = parse_args()
unity_client = udp_client.SimpleUDPClient(args.unity_ip, args.unity_port)
td_client = udp_client.SimpleUDPClient(args.td_ip, args.td_port)
offensive_classifier = pipeline("text-classification", model=Config.OFFENSIVE_MODEL)
context_classifier = pipeline("zero-shot-classification", model=Config.CONTEXT_MODEL)
disp = dispatcher.Dispatcher()
disp.map("/florence_sentence", lambda unused_addr, *osc_args: asyncio.run(handle_osc_message(unity_client, td_client, unused_addr, offensive_classifier, context_classifier, *osc_args)))
server = osc_server.ThreadingOSCUDPServer((args.local_ip, args.local_port), disp)
print(f"Serving on {server.server_address}")
if name == “main”:
main()