Skip to content

Commit 9f3211c

Browse files
author
Nicolas Legrand
authored
Fix and document the use of preprocessing (#63)
* preprocessing: check for the number of valid trials before fitting SDT and metacognitive models. * tutorial on how to use preprocessing. Remove the repeated measures notebook. * Update the notebooks. Remove png and use graphviz instead.
1 parent dd4e896 commit 9f3211c

10 files changed

+3576
-3392
lines changed

.github/workflows/docs.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ jobs:
2424

2525
- name: Build
2626
run: |
27+
sudo apt-get install graphviz
2728
pip install psychopy==2020.1.2
2829
pip install -r requirements.txt
2930
pip install -r requirements-docs.txt

cardioception/reports.py

Lines changed: 61 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ def preprocessing(results: Union[PathLike, pd.DataFrame]) -> pd.DataFrame:
3333
estimating the metacognitive sensitivity meta-d' (`bayesian_dprime`,
3434
`bayesian_criterion`, `bayesian_meta_d`, `bayesian_m_ratio`). The dprime and
3535
criterion can vary between the two methods. It is recommended to use the estimates
36-
consistently.
36+
consistently. Before the estimation of SDT and metacognitive metrics, the function
37+
ensure that at least 5 valid trials of each signal are present, otherwise returns
38+
`None`.
3739
3840
When using this function for analysing results from the Heart Rate Discrimination
3941
task, the following packages should be credited: Systole [1]_, metadpy [2]_ and
@@ -53,8 +55,7 @@ def preprocessing(results: Union[PathLike, pd.DataFrame]) -> pd.DataFrame:
5355
Notes
5456
-----
5557
This function will require [PyMC](https://github.com/pymc-devs/pymc) (>= 5.0) and
56-
[metadpy](https://github.com/embodied-computation-group/metadpy) (>=0.1.0) to work
57-
correctly.
58+
[metadpy](https://github.com/embodied-computation-group/metadpy) (>=0.1.0).
5859
5960
References
6061
----------
@@ -101,54 +102,70 @@ def preprocessing(results: Union[PathLike, pd.DataFrame]) -> pd.DataFrame:
101102
)
102103
this_modality["Responses"] = this_modality.Decision == "More"
103104

104-
hit, miss, fa, cr = this_modality.scores()
105-
hr, far = sdt.rates(hits=hit, misses=miss, fas=fa, crs=cr)
106-
d, c = sdt.dprime(hit_rate=hr, fa_rate=far), sdt.criterion(
107-
hit_rate=hr, fa_rate=far
108-
)
105+
# check that both signals have at least 5 valid trials each
106+
if (this_modality["Stimuli"].sum() > 5) & (
107+
(~this_modality["Stimuli"]).sum() > 5
108+
):
109+
110+
hit, miss, fa, cr = this_modality.scores()
111+
hr, far = sdt.rates(hits=hit, misses=miss, fas=fa, crs=cr)
112+
d, c = sdt.dprime(hit_rate=hr, fa_rate=far), sdt.criterion(
113+
hit_rate=hr, fa_rate=far
114+
)
115+
else:
116+
d, c, = (
117+
None,
118+
None,
119+
)
109120

110121
# metacognitive sensitivity
111122
# -------------------------
123+
(
124+
bayesian_dprime,
125+
bayesian_criterion,
126+
bayesian_meta_d,
127+
bayesian_m_ratio,
128+
) = (None, None, None, None)
129+
112130
this_modality = this_modality[
113131
~this_modality.Confidence.isna()
114-
] # Drop trials with NaN in confidence rating
132+
].copy() # Drop trials with NaN in confidence rating
115133
this_modality.loc[:, "Accuracy"] = (
116-
this_modality["Stimuli"] & this_modality["Responses"]
117-
) | (~this_modality["Stimuli"] & ~this_modality["Responses"])
118-
119-
try:
120-
new_ratings, _ = discreteRatings(
121-
this_modality.Confidence.to_numpy(), verbose=False
122-
)
123-
this_modality.loc[:, "discrete_confidence"] = new_ratings
124-
125-
metad = bayesian.hmetad(
126-
data=this_modality,
127-
stimuli="Stimuli",
128-
accuracy="Accuracy",
129-
confidence="discrete_confidence",
130-
nRatings=4,
131-
output="dataframe",
132-
)
133-
bayesian_dprime = metad["d"].values[0]
134-
bayesian_criterion = metad["c"].values[0]
135-
bayesian_meta_d = metad["meta_d"].values[0]
136-
bayesian_m_ratio = metad["m_ratio"].values[0]
137-
138-
except ValueError:
139-
print(
140-
(
141-
f"Cannot discretize ratings for modaliti: {modality}. "
142-
"The metacognitive efficiency will not be reported."
134+
(this_modality["Stimuli"] & this_modality["Responses"])
135+
| (~this_modality["Stimuli"] & ~this_modality["Responses"])
136+
).copy()
137+
138+
# check that both signals have at least 5 valid trials each
139+
if (this_modality["Stimuli"].sum() > 5) & (
140+
(~this_modality["Stimuli"]).sum() > 5
141+
):
142+
143+
try:
144+
new_ratings, _ = discreteRatings(
145+
this_modality.Confidence.to_numpy(), verbose=False
146+
)
147+
this_modality.loc[:, "discrete_confidence"] = new_ratings
148+
149+
metad = bayesian.hmetad(
150+
data=this_modality,
151+
stimuli="Stimuli",
152+
accuracy="Accuracy",
153+
confidence="discrete_confidence",
154+
nRatings=4,
155+
output="dataframe",
156+
)
157+
bayesian_dprime = metad["d"].values[0]
158+
bayesian_criterion = metad["c"].values[0]
159+
bayesian_meta_d = metad["meta_d"].values[0]
160+
bayesian_m_ratio = metad["m_ratio"].values[0]
161+
162+
except ValueError:
163+
print(
164+
(
165+
f"Cannot discretize ratings for modality: {modality}. "
166+
"The metacognitive efficiency will not be reported."
167+
)
143168
)
144-
)
145-
146-
(
147-
bayesian_dprime,
148-
bayesian_criterion,
149-
bayesian_meta_d,
150-
bayesian_m_ratio,
151-
) = (None, None, None, None)
152169

153170
# bayesian psychophysics
154171
# ----------------------

0 commit comments

Comments
 (0)