Trials (2)

Source: demos/trial_2

This demo follows on from the previous Trial demo. Its key feature is programmatically generating audio stimuli. Instead of manually creating a folder of audio stimuli in advance, the experimenter instead defines a custom function, in this case synth_prosody, which is called to generate stimuli.

The stimulus set is specified in the form of a list of Nodes. The Node is a core concept in PsyNet that typically corresponds to some kind of ‘stimulus generator’. We define a collection of Nodes using a so-called list comprehension. List comprehensions are a special feature of Python that are really great for creating exhaustive combinations of experimental parameters. For example, the following part of the list comprehension makes sure that Nodes are created in all combinations of five frequency gradients and three start frequencies.

for frequency_gradient in [-100, -50, 0, 50, 100]
for start_frequency in [-100, 0, 100]

Each Node is linked to a Cached Function Asset. Assets correspond to files that are managed by PsyNet. A Function Asset is an Asset that is generated by a function; a Cached Function Asset is a Function Asset whose results are cached to avoid unnecessary resource usage. Typically the outputs will be stored on a web server and the caching will serve to avoid running the functions and uploading the files, both of which can otherwise be time-consuming. When you run an experiment, PsyNet will automatically check the status of these Assets and perform any computations or uploads that are necessary.

Source: demos/trial_2/

import random

import psynet.experiment
from psynet.asset import CachedFunctionAsset, LocalStorage, S3Storage  # noqa
from import Bot
from psynet.consent import NoConsent
from psynet.modular_page import AudioPrompt, ModularPage, PushButtonControl
from import SuccessfulEndPage
from psynet.timeline import Module, Timeline, for_loop
from psynet.trial import Node, Trial

from .custom_synth import synth_prosody

def synth_stimulus(path, frequencies):
    synth_prosody(vector=frequencies, output_path=path)

            "frequency_gradient": frequency_gradient,
            "start_frequency": start_frequency,
            "frequencies": [start_frequency + i * frequency_gradient for i in range(5)],
            "stimulus": CachedFunctionAsset(
    for frequency_gradient in [-100, -50, 0, 50, 100]
    for start_frequency in [-100, 0, 100]

class RateTrial(Trial):
    time_estimate = 5

    def show_trial(self, experiment, participant):
        return ModularPage(
                text="How happy is the following word?",
                ["Not at all", "A little", "Very much"],

audio_ratings = Module(
        label="Deliver 5 random samples from the stimulus set",
        iterate_over=lambda nodes: random.sample(nodes, 5),
        logic=lambda node: RateTrial.cue(node),

class Exp(psynet.experiment.Experiment):
    label = "Simple trial demo (2)"
    asset_storage = LocalStorage()
    # asset_storage = S3Storage("psynet-tests", "static-audio")

    timeline = Timeline(

    def test_check_bot(self, bot: Bot, **kwargs):
        assert len(bot.alive_trials) == 5

Source: demos/trial_2/

import os

import numpy as np

def synth_prosody(vector, output_path):
    Synthesises a stimulus.


    vector : list
        A vector of parameters as produced by the Gibbs sampler,
        for example:


            [144.11735609, 159.17558762, 232.15967799, 298.43893329, 348.34553954]

    output_path : str
        The output path for the generated file.
    assert len(vector) == 5
    times = np.array([0.0, 0.090453, 0.18091, 0.27136, 0.36181])
    freqs = np.array(vector)
    x = np.column_stack((times, freqs))

    effects = [{"name": "fade-out", "duration": 0.01}]


def synth_batch(
    Create stimuli based on BPFs

    BPFs (list): List of numpy matrices the first column is time, the second column pitch change in cents
    filenames (list): Filenames of synthesized files
    baseline_audio_path (str): Filepath to baseline

    prepend_path (str): name of the wav file to prepend to the audio
    append_path (str): name of the wav file to append to the audio
    reference_tone (int): default 235 Hz
    man_step_size (float): The pitch tracking window size
    man_min_F0 (float): The pitch floor
    man_max_F0 (float): The pitch ceiling

    effects (list): List of dictionaries that describe effects applied to the baseline_audio_path


    from parselmouth import Sound
    from parselmouth.praat import call
    from import write as write_wav

    # Do some checks
    supported_effects = ["fade-out"]
    if not all(
        ["name" in e.keys() and e["name"] in supported_effects for e in effects]
        raise ValueError(
            "Your effect must have a name. Currently we only support the following effects: %s"
            % ", ".join(supported_effects)

    if len(BPFs) != len(filenames):
        raise ValueError("Need to be of same length!")

    if append_path is not None and not os.path.exists(append_path):
        raise FileNotFoundError("Specified `append_path` not found on this system")

    if prepend_path is not None and not os.path.exists(prepend_path):
        raise FileNotFoundError("Specified `prepend_path` not found on this system")

    if not os.path.exists(baseline_audio_path):
        raise FileNotFoundError(
            "Specified `baseline_audio_path` not found on this system"

    def cent2herz(ct, base=reference_tone):
        """Converts deviation in cents to a value in Hertz"""
        st = ct / 100
        semi1 = np.log(np.power(2, 1 / 12))
        return np.exp(st * semi1) * base

    # Load the sound
    sound = Sound(baseline_audio_path)
    if prepend_path is not None:
        pre_sound = Sound(prepend_path)

    if append_path is not None:
        app_sound = Sound(append_path)

    # Create a manipulation object
    manipulation = call(sound, "To Manipulation", man_step_size, man_min_F0, man_max_F0)

    # Extract the pitch tier
    pitch_tier = call(manipulation, "Extract pitch tier")

    for BPF_idx, BPF in enumerate(BPFs):
        # Make sure the pitch Tier is empty
        call(pitch_tier, "Remove points between", sound.xmin, sound.xmax)

        # Convert cents to Hertz
        BPF[:, 1] = [cent2herz(ct) for ct in BPF[:, 1]]

        # Populate the pitch tier
        for point_idx in range(BPF.shape[0]):
            call(pitch_tier, "Add point", BPF[point_idx, 0], BPF[point_idx, 1])

        # Use it in the manipulation object
        call([pitch_tier, manipulation], "Replace pitch tier")

        # Synthesize it
        synth_main = call(manipulation, "Get resynthesis (overlap-add)")

        # Assuming all effects are applied to the main file
        for effect in effects:
            if effect["name"] == "fade-out":
                if "duration" in effect.keys():
                        "Fade out",
                        synth_main.xmax - effect["duration"],

        # Concatenate it
        if prepend_path is not None or append_path is not None:
            sounds = []
            if prepend_path is not None:
            if append_path is not None:
            synth_main = call(sounds, "Concatenate")

        filepath = filenames[BPF_idx]
        write_wav(filepath, int(synth_main.sampling_frequency), synth_main.values.T)
        call(synth_main, "Save as WAV file", filepath)