Hi there,
I would like to subset and execute the subset of the Allen V1 model.
I am also hoping to store intermediate simulation configuration files not as Python code, but if possible as cpp code. I am wondering if there is a way of storing the matrices in a cpp native way?
Forinstance I have noticed the class https://github.com/nest/nest-simulator/blob/master/nestkernel/connection_man... I think that this would class may have
I acquired the config.json file from DropBox and all of the assoicated hdf5, and csv files from here:
https://www.dropbox.com/sh/w5u31m3hq6u2x5m/AAB0B6OLRyLJwxCKjuWrG26pa/simulat...
I have found the nodes in the model pertaining to only layer 23 by wrangling data frames in Julia (see the code below). Calling the method `extract_subset` is slower than I would like. I suspect it would be faster to use the SL or /nest_kernel more directly, but I don't know what interventions I would need to do to get the PyNest instance to dump its intermediate objects in a cpp native way. I wonder if any one has any hints on how to do this?
subset0 = get_layer_sub_net("i23Htr3a") subset1 = get_layer_sub_net("i23Pvalb") subset2 = get_layer_sub_net("i23Sst") subset3 = get_layer_sub_net("e23Cux2")
For instance I would like to be able to use the nestkernel CLI to save using Pkg using Conda using PyCall ENV["PYTHON"] = $HOME/.julia/conda/3/lib/python3.10/ Pkg.build("PyCall")
py""" def init_sim_to_call_from_julia():#slice_start,slice_end,nest=None,sim=None) import os, sys import math import numpy as np from bmtk.simulator import pointnet from bmtk.simulator.pointnet.pyfunction_cache import synaptic_weight from bmtk.simulator.pointnet.io_tools import io import nest import h5py try: nest.Install('glifmodule') except Exception as e: pass @synaptic_weight def DirectionRule_others(edges, src_nodes, trg_nodes): src_tuning = src_nodes['tuning_angle'].values tar_tuning = trg_nodes['tuning_angle'].values sigma = edges['weight_sigma'].values nsyn = edges['nsyns'].values syn_weight = edges['syn_weight'].values
delta_tuning_180 = np.abs(np.abs(np.mod(np.abs(tar_tuning - src_tuning), 360.0) - 180.0) - 180.0) w_multiplier_180 = np.exp(-(delta_tuning_180 / sigma) ** 2)
return syn_weight * w_multiplier_180 * nsyn
@synaptic_weight def DirectionRule_EE(edges, src_nodes, trg_nodes): src_tuning = src_nodes['tuning_angle'].values tar_tuning = trg_nodes['tuning_angle'].values x_tar = trg_nodes['x'].values x_src = src_nodes['x'].values z_tar = trg_nodes['z'].values z_src = src_nodes['z'].values sigma = edges['weight_sigma'].values nsyn = edges['nsyns'].values syn_weight = edges['syn_weight'].values
delta_tuning_180 = np.abs(np.abs(np.mod(np.abs(tar_tuning - src_tuning), 360.0) - 180.0) - 180.0) w_multiplier_180 = np.exp(-(delta_tuning_180 / sigma) ** 2)
delta_x = (x_tar - x_src) * 0.07 delta_z = (z_tar - z_src) * 0.04
theta_pref = tar_tuning * (np.pi / 180.) xz = delta_x * np.cos(theta_pref) + delta_z * np.sin(theta_pref) sigma_phase = 1.0 phase_scale_ratio = np.exp(- (xz ** 2 / (2 * sigma_phase ** 2)))
# To account for the 0.07 vs 0.04 dimensions. This ensures the horizontal neurons are scaled by 5.5/4 (from the # midpoint of 4 & 7). Also, ensures the vertical is scaled by 5.5/7. This was a basic linear estimate to get the # numbers (y = ax + b). theta_tar_scale = abs(abs(abs(180.0 - np.mod(np.abs(tar_tuning), 360.0)) - 90.0) - 90.0) phase_scale_ratio = phase_scale_ratio * (5.5 / 4.0 - 11.0 / 1680.0 * theta_tar_scale)
return syn_weight * w_multiplier_180 * phase_scale_ratio * nsyn
def main(config_file): configure = pointnet.Config.from_json(config_file) configure.build_env() graph = pointnet.PointNetwork.from_config(configure) sim = pointnet.PointSimulator.from_config(configure, graph) node_info = list(graph.get_node_populations()) dfv1 = node_info[0].nodes_df() pop_names = { k:v for k,v in zip(dfv1.index,dfv1["pop_name"].values) } dfv1e = dfv1[dfv1["ei"] == "e"].index dfv1i = dfv1[dfv1["ei"] == "i"].index
dfv1e_pop = dfv1[dfv1["ei"] == "e"]["pop_name"].values dfv1i_pop = dfv1[dfv1["ei"] == "i"]["pop_name"].values lgn = node_info[1].nodes_df() lgne = lgn[lgn["ei"] == "e"].index
return nest,sim,node_info,dfv1e,dfv1i,lgne,dfv1e_pop,dfv1i_pop,pop_names #,lgni
nest,sim,node_info,dfv1e,dfv1i,lgne,dfv1e_pop,dfv1i_pop,pop_names = main('config.json') # ,lgni return (nest,sim,node_info,dfv1e,dfv1i,lgne,dfv1e_pop,dfv1i_pop,pop_names)
""" (nest,sim,node_info,dfv1e,dfv1i,lgne,dfv1e_pop,dfv1i_pop,pop_names) = py"init_sim_to_call_from_julia"()
function get_layer_sub_net(target) subset = Int64[] for (k,v) in pairs(pop_names) if v==target println(k,v) append!(subset,k) end end subset
end
subset0 = get_layer_sub_net("i23Htr3a") subset1 = get_layer_sub_net("i23Pvalb") subset2 = get_layer_sub_net("i23Sst") subset3 = get_layer_sub_net("e23Cux2")
layer_23_ids = [] layer_23_ids = vcat(layer_23_ids,subset0) layer_23_ids = vcat(layer_23_ids,subset1) layer_23_ids = vcat(layer_23_ids,subset2) layer_23_ids = vcat(layer_23_ids,subset3)
py"""def get_conns_from_py_nest(nest,sim,slice_start,nodes=None): conns = nest.GetConnections(nest.NodeCollection([slice_start])) nodes = nest.NodeCollection([slice_start]) node_ = nodes.get() dict_ = conns.get() return (dict_["source"],dict_["target"],dict_["weight"])
"""
function extract_subset(nest,sim,subset) N = 230924+1#+17400#230901 exc_matrix = spzeros(N,N) @inbounds for srcv in ProgressBars.ProgressBar(subset) result = py"get_conns_from_py_nest"(nest,sim,srcv) @inbounds for (x,y,z) in zip(Vector{Int64}(result[1]),Vector{Int64}(result[2]),Vector{Float64}(result[3])) exc_matrix[y,x] = z end end exc_matrix
end exc_matrix = extract_subset(nest,sim,layer_23_ids)
Thanks for your help :)
Russell.