Save NIDM-Results packs to NiMARE dataset

In [ ]:
import json
from glob import glob
import nibabel as nib
import pandas as pd
import numpy as np
from scipy import ndimage
from os.path import basename, join, isfile, dirname

import nimare
/home/james/.conda/envs/nimare_dev/lib/python3.7/site-packages/duecredit-0.6.4-py3.7.egg/duecredit/utils.py:32: DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5
  and platform.linux_distribution()[0] == 'debian' \
/home/james/.conda/envs/nimare_dev/lib/python3.7/site-packages/duecredit-0.6.4-py3.7.egg/duecredit/io.py:18: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
  from collections import defaultdict, Iterator
/home/james/.conda/envs/nimare_dev/lib/python3.7/site-packages/nipype-1.1.7-py3.7.egg/nipype/interfaces/base/traits_extension.py:28: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
  from collections import Sequence
/home/james/.conda/envs/nimare_dev/lib/python3.7/site-packages/nltk/decorators.py:68: DeprecationWarning: `formatargspec` is deprecated since Python 3.5. Use `signature` and the `Signature` object directly
  regargs, varargs, varkwargs, defaults, formatvalue=lambda value: ""
/home/james/.conda/envs/nimare_dev/lib/python3.7/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__
  return f(*args, **kwds)
/home/james/.conda/envs/nimare_dev/lib/python3.7/site-packages/fuzzywuzzy/fuzz.py:11: UserWarning: Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning
  warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
In [ ]:
def _local_max(data, affine, min_distance):
    """Find all local maxima of the array, separated by at least min_distance.
    Adapted from https://stackoverflow.com/a/22631583/2589328
    Parameters
    ----------
    data : array_like
        3D array of with masked values for cluster.
    min_distance : :obj:`int`
        Minimum distance between local maxima in ``data``, in terms of mm.
    Returns
    -------
    ijk : :obj:`numpy.ndarray`
        (n_foci, 3) array of local maxima indices for cluster.
    vals : :obj:`numpy.ndarray`
        (n_foci,) array of values from data at ijk.
    """
    # Initial identification of subpeaks with minimal minimum distance
    data_max = ndimage.filters.maximum_filter(data, 3)
    maxima = (data == data_max)
    data_min = ndimage.filters.minimum_filter(data, 3)
    diff = ((data_max - data_min) > 0)
    maxima[diff == 0] = 0

    labeled, n_subpeaks = ndimage.label(maxima)
    ijk = np.array(ndimage.center_of_mass(data, labeled,
                                          range(1, n_subpeaks + 1)))
    ijk = np.round(ijk).astype(int)

    vals = np.apply_along_axis(arr=ijk, axis=1, func1d=_get_val,
                               input_arr=data)

    # Sort subpeaks in cluster in descending order of stat value
    order = (-vals).argsort()
    vals = vals[order]
    ijk = ijk[order, :]
    xyz = nib.affines.apply_affine(affine, ijk)  # Convert to xyz in mm

    # Reduce list of subpeaks based on distance
    keep_idx = np.ones(xyz.shape[0]).astype(bool)
    for i in range(xyz.shape[0]):
        for j in range(i + 1, xyz.shape[0]):
            if keep_idx[i] == 1:
                dist = np.linalg.norm(xyz[i, :] - xyz[j, :])
                keep_idx[j] = dist > min_distance
    ijk = ijk[keep_idx, :]
    vals = vals[keep_idx]
    return ijk, vals


def _get_val(row, input_arr):
    """Small function for extracting values from array based on index.
    """
    i, j, k = row
    return input_arr[i, j, k]
In [ ]:
f1 = join(dirname(nimare.__file__), 'tests', 'data', 'nidm_pain_dset.json')
f2 = 'nidm_pain_dset_with_subpeaks.json'
In [ ]:
ddict = {}
folders = sorted(glob('/Users/tsalo/Downloads/nidm-pain-results/pain_*.nidm'))
for folder in folders:
    name = basename(folder)
    ddict[name] = {}
    ddict[name]['contrasts'] = {}
    ddict[name]['contrasts']['1'] = {}
    ddict[name]['contrasts']['1']['coords'] = {}
    ddict[name]['contrasts']['1']['coords']['space'] = 'MNI'
    ddict[name]['contrasts']['1']['images'] = {}
    ddict[name]['contrasts']['1']['images']['space'] = 'MNI_2mm'
    # con file
    files = glob(join(folder, 'Contrast*.nii.gz'))
    files = [f for f in files if 'StandardError' not in basename(f)]
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['con'] = f
    # se file
    files = glob(join(folder, 'ContrastStandardError*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['se'] = f
    # z file
    files = glob(join(folder, 'ZStatistic*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['z'] = f
    # t file
    # z file
    files = glob(join(folder, 'TStatistic*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['t'] = f
    # sample size
    f = join(folder, 'DesignMatrix.csv')
    if isfile(f):
        df = pd.read_csv(f, header=None)
        n = [df.shape[0]]
    else:
        n = None
    ddict[name]['contrasts']['1']['sample_sizes'] = n
    # foci
    files = glob(join(folder, 'ExcursionSet*.nii.gz'))
    f = sorted(files)[0]
    img = nib.load(f)
    data = np.nan_to_num(img.get_data())
    # positive clusters
    binarized = np.copy(data)
    binarized[binarized>0] = 1
    binarized[binarized<0] = 0
    binarized = binarized.astype(int)
    labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]
    clust_ids = sorted(list(np.unique(labeled)[1:]))
    ijk = np.hstack([np.where(data * (labeled == c) == np.max(data * (labeled == c))) for c in clust_ids])
    ijk = ijk.T
    xyz = nib.affines.apply_affine(img.affine, ijk)
    ddict[name]['contrasts']['1']['coords']['x'] = list(xyz[:, 0])
    ddict[name]['contrasts']['1']['coords']['y'] = list(xyz[:, 1])
    ddict[name]['contrasts']['1']['coords']['z'] = list(xyz[:, 2])

with open(f1, 'w') as fo:
    json.dump(ddict, fo, sort_keys=True, indent=4)
In [ ]:
ddict = {}
folders = sorted(glob('/Users/tsalo/Downloads/nidm-pain-results/pain_*.nidm'))
for folder in folders:
    name = basename(folder)
    ddict[name] = {}
    ddict[name]['contrasts'] = {}
    ddict[name]['contrasts']['1'] = {}
    ddict[name]['contrasts']['1']['coords'] = {}
    ddict[name]['contrasts']['1']['coords']['space'] = 'MNI'
    ddict[name]['contrasts']['1']['images'] = {}
    ddict[name]['contrasts']['1']['images']['space'] = 'MNI_2mm'
    # con file
    files = glob(join(folder, 'Contrast*.nii.gz'))
    files = [f for f in files if 'StandardError' not in basename(f)]
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['con'] = f
    # se file
    files = glob(join(folder, 'ContrastStandardError*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['se'] = f
    # z file
    files = glob(join(folder, 'ZStatistic*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['z'] = f
    # t file
    # z file
    files = glob(join(folder, 'TStatistic*.nii.gz'))
    if files:
        f = sorted(files)[0]
    else:
        f = None
    ddict[name]['contrasts']['1']['images']['t'] = f
    # sample size
    f = join(folder, 'DesignMatrix.csv')
    if isfile(f):
        df = pd.read_csv(f, header=None)
        n = [df.shape[0]]
    else:
        n = None
    ddict[name]['contrasts']['1']['sample_sizes'] = n
    # foci
    files = glob(join(folder, 'ExcursionSet*.nii.gz'))
    f = sorted(files)[0]
    img = nib.load(f)
    data = np.nan_to_num(img.get_data())
    # positive clusters
    binarized = np.copy(data)
    binarized[binarized>0] = 1
    binarized[binarized<0] = 0
    binarized = binarized.astype(int)
    labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]
    clust_ids = sorted(list(np.unique(labeled)[1:]))
    
    peak_vals = np.array([np.max(data * (labeled == c)) for c in clust_ids])
    clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]  # Sort by descending max value

    ijk = []
    for c_id, c_val in enumerate(clust_ids):
        cluster_mask = labeled == c_val
        masked_data = data * cluster_mask

        # Get peaks, subpeaks and associated statistics
        subpeak_ijk, subpeak_vals = _local_max(masked_data, img.affine,
                                               min_distance=8)

        # Only report peak and, at most, top 3 subpeaks.
        n_subpeaks = np.min((len(subpeak_vals), 4))
        #n_subpeaks = len(subpeak_vals)
        subpeak_ijk = subpeak_ijk[:n_subpeaks, :]
        ijk.append(subpeak_ijk)
    ijk = np.vstack(ijk)
    xyz = nib.affines.apply_affine(img.affine, ijk)
    ddict[name]['contrasts']['1']['coords']['x'] = list(xyz[:, 0])
    ddict[name]['contrasts']['1']['coords']['y'] = list(xyz[:, 1])
    ddict[name]['contrasts']['1']['coords']['z'] = list(xyz[:, 2])


with open(f2, 'w') as fo:
    json.dump(ddict, fo, sort_keys=True, indent=4)

Home | github | NiMARE