min_time_day2 <- df_day2$time[which.min(predict(fit_day2))] + 24
# Store results in a summary data frame
activity_summary <- data.frame(Day = c("Day1", "Day2"),
MaxTime = c(max_time_day1, max_time_day2),
MinTime = c(min_time_day1, min_time_day2))
# Print results
print(activity_summary)
print(p_value_acrophase)
df_day1
df_day2
plot_cos_day1
rm(list = ls())
#### packages to load ----
library("cosinor")
library("dplyr")
library("ggplot2")
library("gridExtra")
#### functions ----
assign_decimal_time <- function(num_rows) {
# Create a sequence from 0 to 48 hours in decimal form
decimal_times <- seq(0, 48, length.out = num_rows)
return(decimal_times)
}
# main
setwd('C://Users//guyze//OneDrive//Documents//Tunicate//may_2023_behavior//48_H_after_spikeSorting')
#### behavior_dlc_analysis ----
c1 <- read.csv('spike_plot_data_cluster_1_29.csv')
c2 <- read.csv('spike_plot_data_cluster_2_29.csv')
c3 <- read.csv('spike_plot_data_cluster_3_29.csv')
c4 <- read.csv('spike_plot_data_cluster_4_29.csv')
df <- rbind(c1, c2, c3, c4)
df <- data.frame(Y = df$SpikeData, X = df$Cluster)
fit_list <- list()
p_value_acrophase <- list()
data_plots_list_d1 <- list()
data_plots_list_d2 <- list()
# Unique clusters in your dataset
clusters <- unique(df$X)
for (i in seq_along(clusters)) {
# Filter data for the current cluster
df_pair <- df |> filter(X == clusters[i])
df_pair$time <- assign_decimal_time(nrow(df_pair))
# Split data into two 24-hour segments
df_day1 <- df_pair |> filter(time <= 24)
df_day2 <- df_pair |> filter(time > 24) |> mutate(time = time - 24)
# Perform cosinor analysis for each day segment with a 24-hour period
fit_day1 <- cosinor.lm(Y ~ time(time), data = df_day1, period = 24)
fit_day2 <- cosinor.lm(Y ~ time(time), data = df_day2, period = 24)
# Save fits and p-values
fit_list[[paste(i, "day1", sep = "_")]] <- fit_day1
fit_list[[paste(i, "day2", sep = "_")]] <- fit_day2
p_value_acrophase[[paste(i, "day1", sep = "_")]] <- summary(fit_day1)$transformed.table$p.value[3]
p_value_acrophase[[paste(i, "day2", sep = "_")]] <- summary(fit_day2)$transformed.table$p.value[3]
# Generate plot data
plot_cos_day1 <- ggplot_cosinor.lm(fit_day1)
plot_cos_day2 <- ggplot_cosinor.lm(fit_day2)
data_plots_list_d1[[paste(i, "day1", sep = "_")]] <- data.frame(
Time = plot_cos_day1$data$time,
Y_hat = plot_cos_day1$data$Y.hat,
ID = rep(i, length(plot_cos_day1$data$Y.hat))
)
data_plots_list_d2[[paste(i, "day2", sep = "_")]] <- data.frame(
Time = plot_cos_day2$data$time + 24,
Y_hat = plot_cos_day2$data$Y.hat,
ID = rep(i, length(plot_cos_day2$data$Y.hat))
)
# Combine both plots for a complete view
pp <- ggplot() +
geom_point(data = df_pair, aes(x = time, y = Y), size = 1.5) +
geom_line(data = data_plots_list_d1[[paste(i, "day1", sep = "_")]], aes(x = Time, y = Y_hat), color = "blue", size = 1.5) +
geom_line(data = data_plots_list_d2[[paste(i, "day2", sep = "_")]], aes(x = Time, y = Y_hat), color = "red", size = 1.5) +
theme_minimal() +
labs(x = "Time (Hours)", y = "Spike count (#)") +
theme(
legend.position = "none",
text = element_text(size = 10),
axis.title = element_text(size = 37),
axis.text = element_text(size = 37),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
plot.margin = margin(t = 40, r = 40, b = 40, l = 40)
)
print(pp)
}
rm(list = ls())
#### packages to load ----
library("cosinor")
library("dplyr")
library("ggplot2")
library("gridExtra")
#### functions ----
assign_decimal_time <- function(num_rows) {
# Create a sequence from 0 to 48 hours in decimal form
decimal_times <- seq(0, 48, length.out = num_rows)
return(decimal_times)
}
# main
setwd('C://Users//guyze//OneDrive//Documents//Tunicate//may_2023_behavior//48_H_after_spikeSorting')
#### behavior_dlc_analysis ----
c1 <- read.csv('spike_plot_data_cluster_1_11.csv')
c2 <- read.csv('spike_plot_data_cluster_2_11.csv')
c3 <- read.csv('spike_plot_data_cluster_3_11.csv')
c4 <- read.csv('spike_plot_data_cluster_4_11.csv')
df <- rbind(c1, c2, c3, c4)
df <- data.frame(Y = df$SpikeData, X = df$Cluster)
fit_list <- list()
p_value_acrophase <- list()
data_plots_list_d1 <- list()
data_plots_list_d2 <- list()
# Unique clusters in your dataset
clusters <- unique(df$X)
for (i in seq_along(clusters)) {
# Filter data for the current cluster
df_pair <- df |> filter(X == clusters[i])
df_pair$time <- assign_decimal_time(nrow(df_pair))
# Split data into two 24-hour segments
df_day1 <- df_pair |> filter(time <= 24)
df_day2 <- df_pair |> filter(time > 24) |> mutate(time = time - 24)
# Perform cosinor analysis for each day segment with a 24-hour period
fit_day1 <- cosinor.lm(Y ~ time(time), data = df_day1, period = 24)
fit_day2 <- cosinor.lm(Y ~ time(time), data = df_day2, period = 24)
# Save fits and p-values
fit_list[[paste(i, "day1", sep = "_")]] <- fit_day1
fit_list[[paste(i, "day2", sep = "_")]] <- fit_day2
p_value_acrophase[[paste(i, "day1", sep = "_")]] <- summary(fit_day1)$transformed.table$p.value[3]
p_value_acrophase[[paste(i, "day2", sep = "_")]] <- summary(fit_day2)$transformed.table$p.value[3]
# Generate plot data
plot_cos_day1 <- ggplot_cosinor.lm(fit_day1)
plot_cos_day2 <- ggplot_cosinor.lm(fit_day2)
data_plots_list_d1[[paste(i, "day1", sep = "_")]] <- data.frame(
Time = plot_cos_day1$data$time,
Y_hat = plot_cos_day1$data$Y.hat,
ID = rep(i, length(plot_cos_day1$data$Y.hat))
)
data_plots_list_d2[[paste(i, "day2", sep = "_")]] <- data.frame(
Time = plot_cos_day2$data$time + 24,
Y_hat = plot_cos_day2$data$Y.hat,
ID = rep(i, length(plot_cos_day2$data$Y.hat))
)
# Combine both plots for a complete view
pp <- ggplot() +
geom_point(data = df_pair, aes(x = time, y = Y), size = 1.5) +
geom_line(data = data_plots_list_d1[[paste(i, "day1", sep = "_")]], aes(x = Time, y = Y_hat), color = "blue", size = 1.5) +
geom_line(data = data_plots_list_d2[[paste(i, "day2", sep = "_")]], aes(x = Time, y = Y_hat), color = "red", size = 1.5) +
theme_minimal() +
labs(x = "Time (Hours)", y = "Spike count (#)") +
theme(
legend.position = "none",
text = element_text(size = 10),
axis.title = element_text(size = 37),
axis.text = element_text(size = 37),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
plot.margin = margin(t = 40, r = 40, b = 40, l = 40)
)
print(pp)
}
rm(list = ls())
#### packages to load ----
library("cosinor")
library("dplyr")
library("ggplot2")
library("gridExtra")
#### functions ----
assign_decimal_time <- function(num_rows) {
# Create a sequence from 0 to 48 hours in decimal form
decimal_times <- seq(0, 48, length.out = num_rows)
return(decimal_times)
}
# main
setwd('C://Users//guyze//OneDrive//Documents//Tunicate//may_2023_behavior//48_H_after_spikeSorting')
#### behavior_dlc_analysis ----
c1 <- read.csv('spike_plot_data_cluster_1_18.csv')
c2 <- read.csv('spike_plot_data_cluster_2_18.csv')
c3 <- read.csv('spike_plot_data_cluster_3_18.csv')
c4 <- read.csv('spike_plot_data_cluster_4_18.csv')
df <- rbind(c1, c2, c3, c4)
df <- data.frame(Y = df$SpikeData, X = df$Cluster)
fit_list <- list()
p_value_acrophase <- list()
data_plots_list_d1 <- list()
data_plots_list_d2 <- list()
# Unique clusters in your dataset
clusters <- unique(df$X)
for (i in seq_along(clusters)) {
# Filter data for the current cluster
df_pair <- df |> filter(X == clusters[i])
df_pair$time <- assign_decimal_time(nrow(df_pair))
# Split data into two 24-hour segments
df_day1 <- df_pair |> filter(time <= 24)
df_day2 <- df_pair |> filter(time > 24) |> mutate(time = time - 24)
# Perform cosinor analysis for each day segment with a 24-hour period
fit_day1 <- cosinor.lm(Y ~ time(time), data = df_day1, period = 24)
fit_day2 <- cosinor.lm(Y ~ time(time), data = df_day2, period = 24)
# Save fits and p-values
fit_list[[paste(i, "day1", sep = "_")]] <- fit_day1
fit_list[[paste(i, "day2", sep = "_")]] <- fit_day2
p_value_acrophase[[paste(i, "day1", sep = "_")]] <- summary(fit_day1)$transformed.table$p.value[3]
p_value_acrophase[[paste(i, "day2", sep = "_")]] <- summary(fit_day2)$transformed.table$p.value[3]
# Generate plot data
plot_cos_day1 <- ggplot_cosinor.lm(fit_day1)
plot_cos_day2 <- ggplot_cosinor.lm(fit_day2)
data_plots_list_d1[[paste(i, "day1", sep = "_")]] <- data.frame(
Time = plot_cos_day1$data$time,
Y_hat = plot_cos_day1$data$Y.hat,
ID = rep(i, length(plot_cos_day1$data$Y.hat))
)
data_plots_list_d2[[paste(i, "day2", sep = "_")]] <- data.frame(
Time = plot_cos_day2$data$time + 24,
Y_hat = plot_cos_day2$data$Y.hat,
ID = rep(i, length(plot_cos_day2$data$Y.hat))
)
# Combine both plots for a complete view
pp <- ggplot() +
geom_point(data = df_pair, aes(x = time, y = Y), size = 1.5) +
geom_line(data = data_plots_list_d1[[paste(i, "day1", sep = "_")]], aes(x = Time, y = Y_hat), color = "blue", size = 1.5) +
geom_line(data = data_plots_list_d2[[paste(i, "day2", sep = "_")]], aes(x = Time, y = Y_hat), color = "red", size = 1.5) +
theme_minimal() +
labs(x = "Time (Hours)", y = "Spike count (#)") +
theme(
legend.position = "none",
text = element_text(size = 10),
axis.title = element_text(size = 37),
axis.text = element_text(size = 37),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
plot.margin = margin(t = 40, r = 40, b = 40, l = 40)
)
print(pp)
}
rm(list = ls())
#### packages to load ----
library("cosinor")
library("dplyr")
library("ggplot2")
library("gridExtra")
#### functions ----
assign_decimal_time <- function(num_rows) {
# Create a sequence from 0 to 48 hours in decimal form
decimal_times <- seq(0, 48, length.out = num_rows)
return(decimal_times)
}
# main
setwd('C://Users//guyze//OneDrive//Documents//Tunicate//may_2023_behavior//48_H_after_spikeSorting')
#### behavior_dlc_analysis ----
c1 <- read.csv('spike_plot_data_cluster_1_11.csv')
c2 <- read.csv('spike_plot_data_cluster_2_11.csv')
c3 <- read.csv('spike_plot_data_cluster_3_11.csv')
c4 <- read.csv('spike_plot_data_cluster_4_11.csv')
df <- rbind(c1, c2, c3, c4)
df <- data.frame(Y = df$SpikeData, X = df$Cluster)
fit_list <- list()
p_value_acrophase <- list()
data_plots_list_d1 <- list()
data_plots_list_d2 <- list()
# Unique clusters in your dataset
clusters <- unique(df$X)
for (i in seq_along(clusters)) {
# Filter data for the current cluster
df_pair <- df |> filter(X == clusters[i])
df_pair$time <- assign_decimal_time(nrow(df_pair))
# Split data into two 24-hour segments
df_day1 <- df_pair |> filter(time <= 24)
df_day2 <- df_pair |> filter(time > 24) |> mutate(time = time - 24)
# Perform cosinor analysis for each day segment with a 24-hour period
fit_day1 <- cosinor.lm(Y ~ time(time), data = df_day1, period = 24)
fit_day2 <- cosinor.lm(Y ~ time(time), data = df_day2, period = 24)
# Save fits and p-values
fit_list[[paste(i, "day1", sep = "_")]] <- fit_day1
fit_list[[paste(i, "day2", sep = "_")]] <- fit_day2
p_value_acrophase[[paste(i, "day1", sep = "_")]] <- summary(fit_day1)$transformed.table$p.value[3]
p_value_acrophase[[paste(i, "day2", sep = "_")]] <- summary(fit_day2)$transformed.table$p.value[3]
# Generate plot data
plot_cos_day1 <- ggplot_cosinor.lm(fit_day1)
plot_cos_day2 <- ggplot_cosinor.lm(fit_day2)
data_plots_list_d1[[paste(i, "day1", sep = "_")]] <- data.frame(
Time = plot_cos_day1$data$time,
Y_hat = plot_cos_day1$data$Y.hat,
ID = rep(i, length(plot_cos_day1$data$Y.hat))
)
data_plots_list_d2[[paste(i, "day2", sep = "_")]] <- data.frame(
Time = plot_cos_day2$data$time + 24,
Y_hat = plot_cos_day2$data$Y.hat,
ID = rep(i, length(plot_cos_day2$data$Y.hat))
)
# Combine both plots for a complete view
pp <- ggplot() +
geom_point(data = df_pair, aes(x = time, y = Y), size = 1.5) +
geom_line(data = data_plots_list_d1[[paste(i, "day1", sep = "_")]], aes(x = Time, y = Y_hat), color = "blue", size = 1.5) +
geom_line(data = data_plots_list_d2[[paste(i, "day2", sep = "_")]], aes(x = Time, y = Y_hat), color = "red", size = 1.5) +
theme_minimal() +
labs(x = "Time (Hours)", y = "Spike count (#)") +
theme(
legend.position = "none",
text = element_text(size = 10),
axis.title = element_text(size = 37),
axis.text = element_text(size = 37),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
plot.margin = margin(t = 40, r = 40, b = 40, l = 40)
)
print(pp)
}
print(p_value_acrophase)
print(p_value_acrophase)
print(p_value_acrophase)
rm(list = ls())
#Exploring moths data set
#### libraries ----
library("brms")
library("bayestestR")
library("dplyr")
library("ggplot2")
### functions ----
model_brm <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 20,
iter = 60,
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
model_brm_prior <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 20,
iter = 60,
sample_prior = "only",
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
#### load data ----
df <- read.csv("data_table_logistic.csv")
rm(list = ls())
#Exploring moths data set
#### libraries ----
library("brms")
library("bayestestR")
library("dplyr")
rm(list = ls())
#Exploring moths data set
#### libraries ----
library("brms")
library("bayestestR")
library("dplyr")
library("ggplot2")
### functions ----
model_brm <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 20,
iter = 60,
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
model_brm_prior <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 20,
iter = 60,
sample_prior = "only",
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
#### load data ----
df <- read.csv("data_table_logistic.csv")
cwd()
setwd('C://Users//guyze//OneDrive//Documents//moths//moths_codes_tables')
#### load data ----
df <- read.csv("data_table_logistic.csv")
df = data.frame(experiment = factor(df$Experiment),
female_count = df$ Number_of_females,
side = df$Side_of_speaker,
y = df$Logistic_count)
#### brms linear ----
formula_01 <- bf(y ~  1 + (1 | female_count))
prior_01 <- prior(normal(0.5, 0.1), class = "Intercept")
experiments_names <- unique(df$experiment)
models_list <- list()
for (experiment_temp in experiments_names){
model <- model_brm(formula_01,df,experiment_temp,prior_01)
params <- insight::get_parameters(model) |> select(b_Intercept)
probability_of_choosing <- sum(params$b_Intercept>=0.5) / length(params$b_Intercept)
models_list[[experiment_temp]] <- list(
"model" = model,
"parameters" = params,
"summary" = probability_of_choosing
)
}
models_list_tag
df$y_tag
models_list
rm(list = ls())
#Exploring moths data set
#### libraries ----
library("brms")
library("bayestestR")
library("dplyr")
library("ggplot2")
### functions ----
model_brm <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 200,
iter = 600,
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
model_brm_prior <- function(myFormula,myData,specifing_experiment,myPrior){
model <- brm(
formula = myFormula,
data = myData |> filter(experiment == specifing_experiment),
family = gaussian(),
prior = myPrior,
chains = 4,
warmup = 200,
iter = 600,
sample_prior = "only",
cores = 8,
backend = "cmdstanr",
silent = 0
)
return(model)
}
#### load data ----
df <- read.csv("data_table_logistic.csv")
df = data.frame(experiment = factor(df$Experiment),
female_count = df$ Number_of_females,
side = df$Side_of_speaker,
y = df$Logistic_count)
#### brms linear ----
formula_01 <- bf(y ~  1 + (1 | female_count))
prior_01 <- prior(normal(0.5, 0.1), class = "Intercept")
experiments_names <- unique(df$experiment)
models_list <- list()
for (experiment_temp in experiments_names){
model <- model_brm(formula_01,df,experiment_temp,prior_01)
params <- insight::get_parameters(model) |> select(b_Intercept)
probability_of_choosing <- sum(params$b_Intercept>=0.5) / length(params$b_Intercept)
models_list[[experiment_temp]] <- list(
"model" = model,
"parameters" = params,
"summary" = probability_of_choosing
)
}
View(models_list)
models_list_tag
df$y_tag <- 1-df$y
# the model
formula_02 <- bf(y_tag ~  1 + (1 | female_count))
models_list_tag <- list()
for (experiment_temp in experiments_names){
model_tag <- model_brm_prior(formula_02,df,experiment_temp,prior_01)
params_tag <- insight::get_parameters(model_tag) |> select(b_Intercept)
probability_of_choosing_tag <- sum(params_tag$b_Intercept>=0.5) / length(params$b_Intercept)
models_list_tag[[experiment_temp]] <- list(
"model" = model_tag,
"parameters" = params_tag,
"summary" = probability_of_choosing_tag
)
}
for (experiment_temp in experiments_names){
model <- model_brm(formula_01,df,experiment_temp,prior_01)
params <- insight::get_parameters(model) |> select(b_Intercept)
#  probability_of_choosing <- sum(params$b_Intercept>=0.5) / length(params$b_Intercept)
models_list[[experiment_temp]] <- list(
"model" = model,
"parameters" = params,
"summary" = probability_of_choosing
)
}
models_list
df
