diff --git a/+types/+core/ElectrodeGroup.m b/+types/+core/ElectrodeGroup.m index 373f54e0f..3539d6d84 100644 --- a/+types/+core/ElectrodeGroup.m +++ b/+types/+core/ElectrodeGroup.m @@ -1,5 +1,5 @@ classdef ElectrodeGroup < types.core.NWBContainer & types.untyped.GroupClass -% ELECTRODEGROUP - A physical grouping of electrodes, e.g. a shank of an array. +% ELECTRODEGROUP - A physical grouping of electrodes, e.g., a shank of an array. An electrode group is typically used to describe electrodes that are physically connected on a single device and are often (but not always) used together for analysis, such as for spike sorting. Note that this is descriptive metadata; electrodes from different groups can still be spike-sorted together if needed. % % Required Properties: % description, device, location diff --git a/+types/+core/ElectrodesTable.m b/+types/+core/ElectrodesTable.m index e84071f1e..085625816 100644 --- a/+types/+core/ElectrodesTable.m +++ b/+types/+core/ElectrodesTable.m @@ -16,12 +16,12 @@ group_name; % (VectorData) Name of the ElectrodeGroup this electrode is a part of. imp; % (VectorData) Impedance of the channel, in ohms. reference; % (VectorData) Description of the reference electrode and/or reference scheme used for this electrode, e.g., "stainless steel skull screw" or "online common average referencing". - rel_x; % (VectorData) x coordinate in electrode group - rel_y; % (VectorData) y coordinate in electrode group - rel_z; % (VectorData) z coordinate in electrode group - x; % (VectorData) x coordinate of the channel location in the brain (+x is posterior). - y; % (VectorData) y coordinate of the channel location in the brain (+y is inferior). - z; % (VectorData) z coordinate of the channel location in the brain (+z is right). + rel_x; % (VectorData) x coordinate in electrode group. Units should be specified in microns. + rel_y; % (VectorData) y coordinate in electrode group. Units should be specified in microns. + rel_z; % (VectorData) z coordinate in electrode group. Units should be specified in microns. + x; % (VectorData) x coordinate of the channel location in the brain (+x is posterior). Units should be specified in microns. + y; % (VectorData) y coordinate of the channel location in the brain (+y is inferior). Units should be specified in microns. + z; % (VectorData) z coordinate of the channel location in the brain (+z is right). Units should be specified in microns. end methods @@ -50,21 +50,23 @@ % % - location (VectorData) - Location of the electrode (channel). Specify the area, layer, comments on estimation of area/layer, stereotaxic coordinates if in vivo, etc. Use standard atlas names for anatomical regions when possible. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - reference (VectorData) - Description of the reference electrode and/or reference scheme used for this electrode, e.g., "stainless steel skull screw" or "online common average referencing". % - % - rel_x (VectorData) - x coordinate in electrode group + % - rel_x (VectorData) - x coordinate in electrode group. Units should be specified in microns. % - % - rel_y (VectorData) - y coordinate in electrode group + % - rel_y (VectorData) - y coordinate in electrode group. Units should be specified in microns. % - % - rel_z (VectorData) - z coordinate in electrode group + % - rel_z (VectorData) - z coordinate in electrode group. Units should be specified in microns. % % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. % - % - x (VectorData) - x coordinate of the channel location in the brain (+x is posterior). + % - x (VectorData) - x coordinate of the channel location in the brain (+x is posterior). Units should be specified in microns. % - % - y (VectorData) - y coordinate of the channel location in the brain (+y is inferior). + % - y (VectorData) - y coordinate of the channel location in the brain (+y is inferior). Units should be specified in microns. % - % - z (VectorData) - z coordinate of the channel location in the brain (+z is right). + % - z (VectorData) - z coordinate of the channel location in the brain (+z is right). Units should be specified in microns. % % Output Arguments: % - electrodesTable (types.core.ElectrodesTable) - A ElectrodesTable object diff --git a/+types/+core/ExperimentalConditionsTable.m b/+types/+core/ExperimentalConditionsTable.m index c81121969..a5db611f7 100644 --- a/+types/+core/ExperimentalConditionsTable.m +++ b/+types/+core/ExperimentalConditionsTable.m @@ -27,6 +27,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - repetitions (DynamicTableRegion) - A reference to one or more rows in the RepetitionsTable table. % % - repetitions_index (VectorIndex) - Index dataset for the repetitions column. diff --git a/+types/+core/FrequencyBandsTable.m b/+types/+core/FrequencyBandsTable.m index e1fea696f..696e2907d 100644 --- a/+types/+core/FrequencyBandsTable.m +++ b/+types/+core/FrequencyBandsTable.m @@ -40,6 +40,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. % % Output Arguments: diff --git a/+types/+core/IntracellularElectrodesTable.m b/+types/+core/IntracellularElectrodesTable.m index 33e872a74..1c5922323 100644 --- a/+types/+core/IntracellularElectrodesTable.m +++ b/+types/+core/IntracellularElectrodesTable.m @@ -26,6 +26,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. % % Output Arguments: diff --git a/+types/+core/IntracellularRecordingsTable.m b/+types/+core/IntracellularRecordingsTable.m index 94b52edb3..2867e3990 100644 --- a/+types/+core/IntracellularRecordingsTable.m +++ b/+types/+core/IntracellularRecordingsTable.m @@ -32,6 +32,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - responses (IntracellularResponsesTable) - Table for storing intracellular response related metadata. % % - stimuli (IntracellularStimuliTable) - Table for storing intracellular stimulus related metadata. diff --git a/+types/+core/IntracellularResponsesTable.m b/+types/+core/IntracellularResponsesTable.m index b05bacddf..fdf01810b 100644 --- a/+types/+core/IntracellularResponsesTable.m +++ b/+types/+core/IntracellularResponsesTable.m @@ -24,6 +24,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - response (TimeSeriesReferenceVectorData) - Column storing the reference to the recorded response for the recording (rows) % % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. diff --git a/+types/+core/IntracellularStimuliTable.m b/+types/+core/IntracellularStimuliTable.m index a2e666eb4..960da276b 100644 --- a/+types/+core/IntracellularStimuliTable.m +++ b/+types/+core/IntracellularStimuliTable.m @@ -28,6 +28,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - stimulus (TimeSeriesReferenceVectorData) - Column storing the reference to the recorded stimulus for the recording (rows). % % - stimulus_template (TimeSeriesReferenceVectorData) - Column storing the reference to the stimulus template for the recording (rows). diff --git a/+types/+core/NWBFile.m b/+types/+core/NWBFile.m index 0d3675401..b76d55ed6 100644 --- a/+types/+core/NWBFile.m +++ b/+types/+core/NWBFile.m @@ -7,7 +7,7 @@ % READONLY PROPERTIES properties(SetAccess = protected) - nwb_version = "2.9.0"; % (char) File version string. Use semantic versioning, e.g. 1.2.1. This will be the name of the format with trailing major, minor and patch numbers. + nwb_version = "2.10.0-alpha"; % (char) File version string. Use semantic versioning, e.g. 1.2.1. This will be the name of the format with trailing major, minor and patch numbers. end % REQUIRED PROPERTIES properties @@ -27,6 +27,7 @@ general_devices_models; % (DeviceModel) Data acquisition device models. general_experiment_description; % (char) General description of the experiment. general_experimenter; % (char) Name of person(s) who performed the experiment. Can also specify roles of different people involved. + general_external_resources; % (HERD) This is the HERD structure for this specific NWBFile, storing the mapped external resources. general_extracellular_ephys; % (ElectrodeGroup) Physical group of electrodes. general_extracellular_ephys_electrodes; % (ElectrodesTable) A table of all electrodes (i.e. channels) used for recording. Changed in NWB 2.9.0 to use the newly added ElectrodesTable neurodata type instead of a DynamicTable with added columns. general_institution; % (char) Institution(s) where experiment was performed. @@ -94,6 +95,8 @@ % % - general_experimenter (char) - Name of person(s) who performed the experiment. Can also specify roles of different people involved. % + % - general_external_resources (HERD) - This is the HERD structure for this specific NWBFile, storing the mapped external resources. + % % - general_extracellular_ephys (ElectrodeGroup) - Physical group of electrodes. % % - general_extracellular_ephys_electrodes (ElectrodesTable) - A table of all electrodes (i.e. channels) used for recording. Changed in NWB 2.9.0 to use the newly added ElectrodesTable neurodata type instead of a DynamicTable with added columns. @@ -179,7 +182,7 @@ % Output Arguments: % - nWBFile (types.core.NWBFile) - A NWBFile object - varargin = [{'nwb_version' '2.9.0'} varargin]; + varargin = [{'nwb_version' '2.10.0-alpha'} varargin]; obj = obj@types.core.NWBContainer(varargin{:}); @@ -196,6 +199,7 @@ addParameter(p, 'general_devices_models',types.untyped.Set()); addParameter(p, 'general_experiment_description',[]); addParameter(p, 'general_experimenter',[]); + addParameter(p, 'general_external_resources',[]); addParameter(p, 'general_extracellular_ephys',types.untyped.Set()); addParameter(p, 'general_extracellular_ephys_electrodes',[]); addParameter(p, 'general_institution',[]); @@ -248,6 +252,7 @@ obj.general_devices_models = p.Results.general_devices_models; obj.general_experiment_description = p.Results.general_experiment_description; obj.general_experimenter = p.Results.general_experimenter; + obj.general_external_resources = p.Results.general_external_resources; obj.general_extracellular_ephys = p.Results.general_extracellular_ephys; obj.general_extracellular_ephys_electrodes = p.Results.general_extracellular_ephys_electrodes; obj.general_institution = p.Results.general_institution; @@ -326,6 +331,9 @@ function set.general_experimenter(obj, val) obj.general_experimenter = obj.validate_general_experimenter(val); end + function set.general_external_resources(obj, val) + obj.general_external_resources = obj.validate_general_external_resources(val); + end function set.general_extracellular_ephys(obj, val) obj.general_extracellular_ephys = obj.validate_general_extracellular_ephys(val); end @@ -496,6 +504,9 @@ function postset_general_source_script_file_name(obj) val = types.util.checkDtype('general_experimenter', 'char', val); types.util.validateShape('general_experimenter', {[Inf]}, val) end + function val = validate_general_external_resources(obj, val) + val = types.util.checkDtype('general_external_resources', 'types.hdmf_common.HERD', val); + end function val = validate_general_extracellular_ephys(obj, val) namedprops = struct(); constrained = {'types.core.ElectrodeGroup'}; @@ -702,6 +713,10 @@ function postset_general_source_script_file_name(obj) end end io.writeGroup(fid, [fullpath '/general']); + if ~isempty(obj.general_external_resources) + refs = obj.general_external_resources.export(fid, [fullpath '/general/external_resources'], refs); + end + io.writeGroup(fid, [fullpath '/general']); if ~isempty(obj.general_extracellular_ephys) refs = obj.general_extracellular_ephys.export(fid, [fullpath '/general/extracellular_ephys'], refs); end diff --git a/+types/+core/PlaneSegmentation.m b/+types/+core/PlaneSegmentation.m index 92e7d8e08..ceb0c1d65 100644 --- a/+types/+core/PlaneSegmentation.m +++ b/+types/+core/PlaneSegmentation.m @@ -39,6 +39,8 @@ % % - imaging_plane (ImagingPlane) - Link to ImagingPlane object from which this data was generated. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - pixel_mask (VectorData) - Pixel masks for each ROI: a list of indices and weights for the ROI. Pixel masks are concatenated and parsing of this dataset is maintained by the PlaneSegmentation. At least one of `image_mask`, `pixel_mask`, or `voxel_mask` is required. % % - pixel_mask_index (VectorIndex) - Index into pixel_mask. diff --git a/+types/+core/RepetitionsTable.m b/+types/+core/RepetitionsTable.m index 9d35078e9..3d426c025 100644 --- a/+types/+core/RepetitionsTable.m +++ b/+types/+core/RepetitionsTable.m @@ -27,6 +27,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - sequential_recordings (DynamicTableRegion) - A reference to one or more rows in the SequentialRecordingsTable table. % % - sequential_recordings_index (VectorIndex) - Index dataset for the sequential_recordings column. diff --git a/+types/+core/SequentialRecordingsTable.m b/+types/+core/SequentialRecordingsTable.m index c7531a50b..3a0586c17 100644 --- a/+types/+core/SequentialRecordingsTable.m +++ b/+types/+core/SequentialRecordingsTable.m @@ -28,6 +28,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - simultaneous_recordings (DynamicTableRegion) - A reference to one or more rows in the SimultaneousRecordingsTable table. % % - simultaneous_recordings_index (VectorIndex) - Index dataset for the simultaneous_recordings column. diff --git a/+types/+core/SimultaneousRecordingsTable.m b/+types/+core/SimultaneousRecordingsTable.m index 9c3a16d28..72bc449fa 100644 --- a/+types/+core/SimultaneousRecordingsTable.m +++ b/+types/+core/SimultaneousRecordingsTable.m @@ -27,6 +27,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - recordings (DynamicTableRegion) - A reference to one or more rows in the IntracellularRecordingsTable table. % % - recordings_index (VectorIndex) - Index dataset for the recordings column. diff --git a/+types/+core/Subject.m b/+types/+core/Subject.m index a1ced2920..c93a8f2b5 100644 --- a/+types/+core/Subject.m +++ b/+types/+core/Subject.m @@ -7,7 +7,7 @@ % OPTIONAL PROPERTIES properties - age; % (char) Age of subject. Can be supplied instead of 'date_of_birth'. + age; % (char) Age of subject. Can be supplied instead of 'date_of_birth'. The ISO 8601 Duration format is recommended, e.g., 'P90D' for 90 days old. If the precise age is unknown, an age range can be given by '[lower bound]/[upper bound]' e.g. 'P10D/P20D' would mean that the age is in between 10 and 20 days. If only the lower bound is known, then including only the slash after that lower bound can be used to indicate a missing bound. For instance, 'P90Y/' would indicate that the age is 90 years or older. age_reference = "birth"; % (char) Age is with reference to this event. Can be 'birth' or 'gestational'. If reference is omitted, 'birth' is implied. date_of_birth; % (datetime) Date of birth of subject. Can be supplied instead of 'age'. description; % (char) Description of subject and where subject came from (e.g., breeder, if animal). @@ -29,7 +29,7 @@ % subject = types.core.SUBJECT(Name, Value) creates a Subject object where one or more property values are specified using name-value pairs. % % Input Arguments (Name-Value Arguments): - % - age (char) - Age of subject. Can be supplied instead of 'date_of_birth'. + % - age (char) - Age of subject. Can be supplied instead of 'date_of_birth'. The ISO 8601 Duration format is recommended, e.g., 'P90D' for 90 days old. If the precise age is unknown, an age range can be given by '[lower bound]/[upper bound]' e.g. 'P10D/P20D' would mean that the age is in between 10 and 20 days. If only the lower bound is known, then including only the slash after that lower bound can be used to indicate a missing bound. For instance, 'P90Y/' would indicate that the age is 90 years or older. % % - age_reference (char) - Age is with reference to this event. Can be 'birth' or 'gestational'. If reference is omitted, 'birth' is implied. % diff --git a/+types/+core/SweepTable.m b/+types/+core/SweepTable.m index 6decf0717..d20f89c61 100644 --- a/+types/+core/SweepTable.m +++ b/+types/+core/SweepTable.m @@ -28,6 +28,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - series (VectorData) - The PatchClampSeries with the sweep number in that row. % % - series_index (VectorIndex) - Index for series. diff --git a/+types/+core/TimeIntervals.m b/+types/+core/TimeIntervals.m index 0d622faf5..d096bd3ac 100644 --- a/+types/+core/TimeIntervals.m +++ b/+types/+core/TimeIntervals.m @@ -34,6 +34,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - start_time (VectorData) - Start time of epoch, in seconds. % % - stop_time (VectorData) - Stop time of epoch, in seconds. diff --git a/+types/+core/Units.m b/+types/+core/Units.m index 2bc4b3733..68f0b1ed7 100644 --- a/+types/+core/Units.m +++ b/+types/+core/Units.m @@ -43,6 +43,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - obs_intervals (VectorData) - Observation intervals for each unit. % % - obs_intervals_index (VectorIndex) - Index into the obs_intervals dataset. diff --git a/+types/+core/Version.m b/+types/+core/Version.m index 35f66bc8a..d168ca9e2 100644 --- a/+types/+core/Version.m +++ b/+types/+core/Version.m @@ -1,3 +1,3 @@ function version = Version() - version = '2.9.0'; + version = '2.10.0-alpha'; end \ No newline at end of file diff --git a/+types/+hdmf_common/AlignedDynamicTable.m b/+types/+hdmf_common/AlignedDynamicTable.m index 7907b4d91..4af503229 100644 --- a/+types/+hdmf_common/AlignedDynamicTable.m +++ b/+types/+hdmf_common/AlignedDynamicTable.m @@ -37,6 +37,8 @@ % % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. % % Output Arguments: diff --git a/+types/+hdmf_common/DynamicTableRegion.m b/+types/+hdmf_common/DynamicTableRegion.m index 146e9a700..82da66ebf 100644 --- a/+types/+hdmf_common/DynamicTableRegion.m +++ b/+types/+hdmf_common/DynamicTableRegion.m @@ -20,7 +20,7 @@ % dynamicTableRegion = types.hdmf_common.DYNAMICTABLEREGION(Name, Value) creates a DynamicTableRegion object where one or more property values are specified using name-value pairs. % % Input Arguments (Name-Value Arguments): - % - data (int8) - Data property for dataset class (DynamicTableRegion) + % - data (int32) - Data property for dataset class (DynamicTableRegion) % % - description (char) - Description of what this table region points to. % @@ -58,7 +58,7 @@ %% VALIDATORS function val = validate_data(obj, val) - val = types.util.checkDtype('data', 'int8', val); + val = types.util.checkDtype('data', 'int32', val); types.util.validateShape('data', {[Inf]}, val) end function val = validate_description(obj, val) diff --git a/+types/+hdmf_common/ElementIdentifiers.m b/+types/+hdmf_common/ElementIdentifiers.m index 1f9e208c9..728dee40a 100644 --- a/+types/+hdmf_common/ElementIdentifiers.m +++ b/+types/+hdmf_common/ElementIdentifiers.m @@ -16,7 +16,7 @@ % elementIdentifiers = types.hdmf_common.ELEMENTIDENTIFIERS(Name, Value) creates a ElementIdentifiers object where one or more property values are specified using name-value pairs. % % Input Arguments (Name-Value Arguments): - % - data (int8) - Data property for dataset class (ElementIdentifiers) + % - data (int32) - Data property for dataset class (ElementIdentifiers) % % Output Arguments: % - elementIdentifiers (types.hdmf_common.ElementIdentifiers) - A ElementIdentifiers object @@ -42,7 +42,7 @@ %% VALIDATORS function val = validate_data(obj, val) - val = types.util.checkDtype('data', 'int8', val); + val = types.util.checkDtype('data', 'int32', val); types.util.validateShape('data', {[Inf]}, val) end %% EXPORT diff --git a/+types/+hdmf_common/HERD.m b/+types/+hdmf_common/HERD.m new file mode 100644 index 000000000..74fc7cf3e --- /dev/null +++ b/+types/+hdmf_common/HERD.m @@ -0,0 +1,201 @@ +classdef HERD < types.hdmf_common.Container & types.untyped.GroupClass +% HERD - HDMF External Resources Data Structure. A set of six tables for tracking external resource references in a file or across multiple files. +% +% Required Properties: +% entities, entity_keys, files, keys, object_keys, objects + + +% REQUIRED PROPERTIES +properties + entities; % REQUIRED (Data) A table for mapping user terms (i.e., keys) to resource entities. + entity_keys; % REQUIRED (Data) A table for identifying which keys use which entity. + files; % REQUIRED (Data) A table for storing object ids of files used in external resources. + keys; % REQUIRED (Data) A table for storing user terms that are used to refer to external resources. + object_keys; % REQUIRED (Data) A table for identifying which objects use which keys. + objects; % REQUIRED (Data) A table for identifying which objects in a file contain references to external resources. +end + +methods + function obj = HERD(varargin) + % HERD - Constructor for HERD + % + % Syntax: + % hERD = types.hdmf_common.HERD() creates a HERD object with unset property values. + % + % hERD = types.hdmf_common.HERD(Name, Value) creates a HERD object where one or more property values are specified using name-value pairs. + % + % Input Arguments (Name-Value Arguments): + % - entities (Data) - A table for mapping user terms (i.e., keys) to resource entities. + % + % - entity_keys (Data) - A table for identifying which keys use which entity. + % + % - files (Data) - A table for storing object ids of files used in external resources. + % + % - keys (Data) - A table for storing user terms that are used to refer to external resources. + % + % - object_keys (Data) - A table for identifying which objects use which keys. + % + % - objects (Data) - A table for identifying which objects in a file contain references to external resources. + % + % Output Arguments: + % - hERD (types.hdmf_common.HERD) - A HERD object + + obj = obj@types.hdmf_common.Container(varargin{:}); + + + p = inputParser; + p.KeepUnmatched = true; + p.PartialMatching = false; + p.StructExpand = false; + addParameter(p, 'entities',[]); + addParameter(p, 'entity_keys',[]); + addParameter(p, 'files',[]); + addParameter(p, 'keys',[]); + addParameter(p, 'object_keys',[]); + addParameter(p, 'objects',[]); + misc.parseSkipInvalidName(p, varargin); + obj.entities = p.Results.entities; + obj.entity_keys = p.Results.entity_keys; + obj.files = p.Results.files; + obj.keys = p.Results.keys; + obj.object_keys = p.Results.object_keys; + obj.objects = p.Results.objects; + if strcmp(class(obj), 'types.hdmf_common.HERD') + cellStringArguments = convertContainedStringsToChars(varargin(1:2:end)); + types.util.checkUnset(obj, unique(cellStringArguments)); + end + end + %% SETTERS + function set.entities(obj, val) + obj.entities = obj.validate_entities(val); + end + function set.entity_keys(obj, val) + obj.entity_keys = obj.validate_entity_keys(val); + end + function set.files(obj, val) + obj.files = obj.validate_files(val); + end + function set.keys(obj, val) + obj.keys = obj.validate_keys(val); + end + function set.object_keys(obj, val) + obj.object_keys = obj.validate_object_keys(val); + end + function set.objects(obj, val) + obj.objects = obj.validate_objects(val); + end + %% VALIDATORS + + function val = validate_entities(obj, val) + types.util.checkType('entities', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.entity_id = 'char'; + vprops.entity_uri = 'char'; + val = types.util.checkDtype('entities', vprops, val); + end + types.util.validateShape('entities', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_entity_keys(obj, val) + types.util.checkType('entity_keys', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.entities_idx = 'uint'; + vprops.keys_idx = 'uint'; + val = types.util.checkDtype('entity_keys', vprops, val); + end + types.util.validateShape('entity_keys', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_files(obj, val) + types.util.checkType('files', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.file_object_id = 'char'; + val = types.util.checkDtype('files', vprops, val); + end + types.util.validateShape('files', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_keys(obj, val) + types.util.checkType('keys', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.key = 'char'; + val = types.util.checkDtype('keys', vprops, val); + end + types.util.validateShape('keys', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_object_keys(obj, val) + types.util.checkType('object_keys', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.objects_idx = 'uint'; + vprops.keys_idx = 'uint'; + val = types.util.checkDtype('object_keys', vprops, val); + end + types.util.validateShape('object_keys', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_objects(obj, val) + types.util.checkType('objects', 'types.hdmf_common.Data', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + if isempty(val) + % skip validation for empty values + else + vprops = struct(); + vprops.files_idx = 'uint'; + vprops.object_id = 'char'; + vprops.object_type = 'char'; + vprops.relative_path = 'char'; + vprops.field = 'char'; + val = types.util.checkDtype('objects', vprops, val); + end + types.util.validateShape('objects', {[Inf]}, val) + val = types.util.rewrapValue(val, originalVal); + end + end + %% EXPORT + function refs = export(obj, fid, fullpath, refs) + refs = export@types.hdmf_common.Container(obj, fid, fullpath, refs); + if any(strcmp(refs, fullpath)) + return; + end + refs = obj.entities.export(fid, [fullpath '/entities'], refs); + refs = obj.entity_keys.export(fid, [fullpath '/entity_keys'], refs); + refs = obj.files.export(fid, [fullpath '/files'], refs); + refs = obj.keys.export(fid, [fullpath '/keys'], refs); + refs = obj.object_keys.export(fid, [fullpath '/object_keys'], refs); + refs = obj.objects.export(fid, [fullpath '/objects'], refs); + end +end + +end \ No newline at end of file diff --git a/+types/+hdmf_common/MeaningsTable.m b/+types/+hdmf_common/MeaningsTable.m new file mode 100644 index 000000000..f09088745 --- /dev/null +++ b/+types/+hdmf_common/MeaningsTable.m @@ -0,0 +1,104 @@ +classdef MeaningsTable < types.hdmf_common.DynamicTable & types.untyped.GroupClass +% MEANINGSTABLE - A table to store information about the meanings of values in a linked VectorData object. All possible values of the linked VectorData object should be present in the 'value' column of this table, even if the value is not observed in the data. Additional columns may be added to store additional metadata about each value. The name of the MeaningsTable should correspond to the name of the linked VectorData object with a "_meanings" suffix. e.g., if the linked VectorData object is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". +% +% Required Properties: +% colnames, description, id, meaning, target, value + + +% REQUIRED PROPERTIES +properties + meaning; % REQUIRED (VectorData) The meaning of the value in the linked VectorData object. + target; % REQUIRED VectorData + value; % REQUIRED (VectorData) The value of a row in the linked VectorData object. +end + +methods + function obj = MeaningsTable(varargin) + % MEANINGSTABLE - Constructor for MeaningsTable + % + % Syntax: + % meaningsTable = types.hdmf_common.MEANINGSTABLE() creates a MeaningsTable object with unset property values. + % + % meaningsTable = types.hdmf_common.MEANINGSTABLE(Name, Value) creates a MeaningsTable object where one or more property values are specified using name-value pairs. + % + % Input Arguments (Name-Value Arguments): + % - colnames (char) - The names of the columns in this table. This should be used to specify an order to the columns. + % + % - description (char) - Description of what is in this dynamic table. + % + % - id (ElementIdentifiers) - Array of unique identifiers for the rows of this dynamic table. + % + % - meaning (VectorData) - The meaning of the value in the linked VectorData object. + % + % - meanings_tables (MeaningsTable) - MeaningsTable objects that provide meanings for values in VectorData columns within this DynamicTable. Tables should be named according to the column they provide meanings for with a "_meanings" suffix. e.g., if a VectorData column is named "stimulus_type", the corresponding MeaningsTable should be named "stimulus_type_meanings". + % + % - target (VectorData) - Link to the VectorData object for which this table provides meanings. + % + % - value (VectorData) - The value of a row in the linked VectorData object. + % + % - vectordata (VectorData) - Vector columns, including index columns, of this dynamic table. + % + % Output Arguments: + % - meaningsTable (types.hdmf_common.MeaningsTable) - A MeaningsTable object + + obj = obj@types.hdmf_common.DynamicTable(varargin{:}); + + + p = inputParser; + p.KeepUnmatched = true; + p.PartialMatching = false; + p.StructExpand = false; + addParameter(p, 'meaning',[]); + addParameter(p, 'target',[]); + addParameter(p, 'value',[]); + misc.parseSkipInvalidName(p, varargin); + obj.meaning = p.Results.meaning; + obj.target = p.Results.target; + obj.value = p.Results.value; + if strcmp(class(obj), 'types.hdmf_common.MeaningsTable') + cellStringArguments = convertContainedStringsToChars(varargin(1:2:end)); + types.util.checkUnset(obj, unique(cellStringArguments)); + end + if strcmp(class(obj), 'types.hdmf_common.MeaningsTable') + types.util.dynamictable.checkConfig(obj); + end + end + %% SETTERS + function set.meaning(obj, val) + obj.meaning = obj.validate_meaning(val); + end + function set.target(obj, val) + obj.target = obj.validate_target(val); + end + function set.value(obj, val) + obj.value = obj.validate_value(val); + end + %% VALIDATORS + + function val = validate_meaning(obj, val) + types.util.checkType('meaning', 'types.hdmf_common.VectorData', val); + if ~isempty(val) + [val, originalVal] = types.util.unwrapValue(val); + val = types.util.checkDtype('meaning', 'char', val); + val = types.util.rewrapValue(val, originalVal); + end + end + function val = validate_target(obj, val) + val = types.util.validateSoftLink('target', val, 'types.hdmf_common.VectorData'); + end + function val = validate_value(obj, val) + types.util.checkType('value', 'types.hdmf_common.VectorData', val); + end + %% EXPORT + function refs = export(obj, fid, fullpath, refs) + refs = export@types.hdmf_common.DynamicTable(obj, fid, fullpath, refs); + if any(strcmp(refs, fullpath)) + return; + end + refs = obj.meaning.export(fid, [fullpath '/meaning'], refs); + refs = obj.target.export(fid, [fullpath '/target'], refs); + refs = obj.value.export(fid, [fullpath '/value'], refs); + end +end + +end \ No newline at end of file diff --git a/+types/+hdmf_common/VectorIndex.m b/+types/+hdmf_common/VectorIndex.m index 839986a9a..4815d33c5 100644 --- a/+types/+hdmf_common/VectorIndex.m +++ b/+types/+hdmf_common/VectorIndex.m @@ -1,5 +1,5 @@ classdef VectorIndex < types.hdmf_common.VectorData & types.untyped.DatasetClass -% VECTORINDEX - Used with VectorData to encode a ragged array. An array of indices into the first dimension of the target VectorData, and forming a map between the rows of a DynamicTable and the indices of the VectorData. The name of the VectorIndex is expected to be the name of the target VectorData object followed by "_index". +% VECTORINDEX - Used with VectorData to encode a ragged array. An array of indices into the first dimension of the target VectorData, forming a map between the rows of a DynamicTable and the indices of the VectorData. The name of the VectorIndex is expected to be the name of the target VectorData object followed by "_index". % % Required Properties: % data, description, target diff --git a/+types/+hdmf_common/Version.m b/+types/+hdmf_common/Version.m index 2eacc64f9..f257e5088 100644 --- a/+types/+hdmf_common/Version.m +++ b/+types/+hdmf_common/Version.m @@ -1,3 +1,3 @@ function version = Version() - version = '1.8.0'; + version = '1.9.0'; end \ No newline at end of file diff --git a/+types/+hdmf_experimental/EnumData.m b/+types/+hdmf_experimental/EnumData.m index d4e741a4d..a696438d5 100644 --- a/+types/+hdmf_experimental/EnumData.m +++ b/+types/+hdmf_experimental/EnumData.m @@ -7,7 +7,7 @@ % REQUIRED PROPERTIES properties - elements; % REQUIRED (Object reference to VectorData) Reference to the VectorData object that contains the enumerable elements + elements; % REQUIRED (Object reference to VectorData) Reference to the VectorData object that contains the enumerable elements. end methods @@ -24,7 +24,7 @@ % % - description (char) - Description of what these vectors represent. % - % - elements (Object reference to VectorData) - Reference to the VectorData object that contains the enumerable elements + % - elements (Object reference to VectorData) - Reference to the VectorData object that contains the enumerable elements. % % Output Arguments: % - enumData (types.hdmf_experimental.EnumData) - A EnumData object diff --git a/+types/+hdmf_experimental/Version.m b/+types/+hdmf_experimental/Version.m index 67df84269..9260759af 100644 --- a/+types/+hdmf_experimental/Version.m +++ b/+types/+hdmf_experimental/Version.m @@ -1,3 +1,3 @@ function version = Version() - version = '0.5.0'; + version = '0.6.0'; end \ No newline at end of file diff --git a/nwb-schema/2.10.0/core/nwb.base.yaml b/nwb-schema/2.10.0/core/nwb.base.yaml new file mode 100644 index 000000000..ce0a3fa0a --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.base.yaml @@ -0,0 +1,287 @@ +datasets: +- neurodata_type_def: NWBData + neurodata_type_inc: Data + doc: An abstract data type for a dataset. + +- neurodata_type_def: TimeSeriesReferenceVectorData + neurodata_type_inc: VectorData + default_name: timeseries + dtype: + - name: idx_start + dtype: int32 + doc: Start index into the TimeSeries 'data' and 'timestamp' datasets of the referenced + TimeSeries. The first dimension of those arrays is always time. + - name: count + dtype: int32 + doc: Number of data samples available in this time series, during this epoch + - name: timeseries + dtype: + target_type: TimeSeries + reftype: object + doc: The TimeSeries that this index applies to + doc: Column storing references to a TimeSeries (rows). For each TimeSeries this + VectorData column stores the start_index and count to indicate the range in time + to be selected as well as an object reference to the TimeSeries. + +- neurodata_type_def: BaseImage + neurodata_type_inc: NWBData + doc: An abstract base type for image data. Parent type for Image and ExternalImage types. + attributes: + - name: description + dtype: text + doc: Description of the image. + required: false + +- neurodata_type_def: Image + neurodata_type_inc: BaseImage + dtype: numeric + dims: + - - x + - y + - - x + - y + - r, g, b + - - x + - y + - r, g, b, a + shape: + - - null + - null + - - null + - null + - 3 + - - null + - null + - 4 + doc: A type for storing image data directly. Shape can be 2-D (x, y), or 3-D where the + third dimension can have three or four elements, e.g. (x, y, (r, g, b)) or + (x, y, (r, g, b, a)). + attributes: + - name: resolution + dtype: float32 + doc: Pixel resolution of the image, in pixels per centimeter. + required: false + +- neurodata_type_def: ExternalImage + neurodata_type_inc: BaseImage + doc: A type for referencing an external image file. The single file path or URI to the + external image file should be stored in the dataset. This type should NOT be used if + the image is stored in another NWB file and that file is linked to this file. + dtype: text + # shape: scalar # this will be supported in the NWB schema language 2.0 + attributes: + - name: image_mode + dtype: text + doc: Image mode (color mode) of the image, e.g., "RGB", "RGBA", "grayscale", and "LA". + required: false + - name: image_format + dtype: text + doc: Common name of the image file format. Only widely readable, open file formats are allowed. + Allowed values are "PNG", "JPEG", and "GIF". + required: true + +- neurodata_type_def: ImageReferences + neurodata_type_inc: NWBData + dtype: + target_type: BaseImage + reftype: object + dims: + - num_images + shape: + - null + doc: Ordered dataset of references to BaseImage (e.g., Image or ExternalImage) objects. + +groups: +- neurodata_type_def: NWBContainer + neurodata_type_inc: Container + doc: An abstract data type for a generic container storing collections of data and + metadata. Base type for all data and metadata containers. + +- neurodata_type_def: NWBDataInterface + neurodata_type_inc: NWBContainer + doc: An abstract data type for a generic container storing collections of data, + as opposed to metadata. + +- neurodata_type_def: TimeSeries + neurodata_type_inc: NWBDataInterface + doc: General purpose time series. + attributes: + - name: description + dtype: text + default_value: no description + doc: Description of the time series. + required: false + - name: comments + dtype: text + default_value: no comments + doc: Human-readable comments about the TimeSeries. This second descriptive field + can be used to store additional information, or descriptive information if the + primary description field is populated with a computer-readable string. + required: false + datasets: + - name: data + dims: + - - num_times + - - num_times + - num_DIM2 + - - num_times + - num_DIM2 + - num_DIM3 + - - num_times + - num_DIM2 + - num_DIM3 + - num_DIM4 + shape: + - - null + - - null + - null + - - null + - null + - null + - - null + - null + - null + - null + doc: Data values. Data can be in 1-D, 2-D, 3-D, or 4-D. The first dimension + should always represent time. This can also be used to store binary data + (e.g., image frames). This can also be a link to data stored in an external file. + attributes: + - name: conversion + dtype: float32 + default_value: 1.0 + doc: Scalar to multiply each element in data to convert it to the specified 'unit'. + If the data are stored in acquisition system units or other units + that require a conversion to be interpretable, multiply the data by 'conversion' + to convert the data to the specified 'unit'. e.g. if the data acquisition system + stores values in this object as signed 16-bit integers (int16 range + -32,768 to 32,767) that correspond to a 5V range (-2.5V to 2.5V), and the data + acquisition system gain is 8000X, then the 'conversion' multiplier to get from + raw data acquisition values to recorded volts is 2.5/32768/8000 = 9.5367e-9. + required: false + - name: offset + dtype: float32 + default_value: 0.0 + doc: Scalar to add to the data after scaling by 'conversion' to finalize its coercion + to the specified 'unit'. Two common examples of this include (a) data stored in an + unsigned type that requires a shift after scaling to re-center the data, + and (b) specialized recording devices that naturally cause a scalar offset with + respect to the true units. + required: false + - name: resolution + dtype: float32 + default_value: -1.0 + doc: Smallest meaningful difference between values in data, stored in the specified + by unit, e.g., the change in value of the least significant bit, or a larger + number if signal noise is known to be present. If unknown, use -1.0. + required: false + - name: unit + dtype: text + doc: Base unit of measurement for working with the data. Actual stored values are + not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + - name: continuity + dtype: text + doc: Optionally describe the continuity of the data. Can be "continuous", "instantaneous", or + "step". For example, a voltage trace would be "continuous", because samples + are recorded from a continuous process. An array of lick times would be "instantaneous", + because the data represents distinct moments in time. Times of image presentations would be + "step" because the picture remains the same until the next timepoint. This field is optional, + but is useful in providing information about the underlying data. It may inform the way this + data is interpreted, the way it is visualized, and what analysis methods are applicable. + required: false + - name: starting_time + dtype: float64 + doc: Timestamp of the first sample in seconds. When timestamps are uniformly + spaced, the timestamp of the first sample can be specified and all subsequent + ones calculated from the sampling rate attribute. + quantity: '?' + attributes: + - name: rate + dtype: float32 + doc: Sampling rate, in Hz. + - name: unit + dtype: text + value: seconds + doc: Unit of measurement for time, which is fixed to 'seconds'. + - name: timestamps + dtype: float64 + dims: + - num_times + shape: + - null + doc: Timestamps for samples stored in data, in seconds, relative to the + common experiment master-clock stored in NWBFile.timestamps_reference_time. + quantity: '?' + attributes: + - name: interval + dtype: int32 + value: 1 + doc: Value is '1' + - name: unit + dtype: text + value: seconds + doc: Unit of measurement for timestamps, which is fixed to 'seconds'. + - name: control + dtype: uint8 + dims: + - num_times + shape: + - null + doc: Numerical labels that apply to each time point in data for the purpose of + querying and slicing data by these values. If present, the length of this + array should be the same size as the first dimension of data. + quantity: '?' + - name: control_description + dtype: text + dims: + - num_control_values + shape: + - null + doc: Description of each control value. Must be present if control is present. + If present, control_description[0] should describe time points where control == 0. + quantity: '?' + groups: + - name: sync + doc: Lab-specific time and sync information as provided directly from hardware + devices and that is necessary for aligning all acquired time information to + a common timebase. The timestamp array stores time in the common timebase. + This group will usually only be populated in TimeSeries that are + stored external to the NWB file, in files storing raw data. Once timestamp + data is calculated, the contents of 'sync' are mostly for archival purposes. + quantity: '?' + +- neurodata_type_def: ProcessingModule + neurodata_type_inc: NWBContainer + doc: A collection of processed data. + attributes: + - name: description + dtype: text + doc: Description of this collection of processed data. + groups: + - neurodata_type_inc: NWBDataInterface + doc: Data objects stored in this collection. + quantity: '*' + - neurodata_type_inc: DynamicTable + doc: Tables stored in this collection. + quantity: '*' + +- neurodata_type_def: Images + neurodata_type_inc: NWBDataInterface + default_name: Images + doc: A collection of images with an optional way to specify the order of the images + using the "order_of_images" dataset. An order must be specified if the images are + referenced by index, e.g., from an IndexSeries. + attributes: + - name: description + dtype: text + doc: Description of this collection of images. + datasets: + - neurodata_type_inc: BaseImage + doc: Images stored in this collection. + quantity: '+' + - name: order_of_images + neurodata_type_inc: ImageReferences + doc: Ordered dataset of references to BaseImage objects stored in the parent group. + Each object in the Images group should be stored once and only once, so + the dataset should have the same length as the number of images. + quantity: '?' diff --git a/nwb-schema/2.10.0/core/nwb.behavior.yaml b/nwb-schema/2.10.0/core/nwb.behavior.yaml new file mode 100644 index 000000000..5e4e2bad6 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.behavior.yaml @@ -0,0 +1,124 @@ +groups: +- neurodata_type_def: SpatialSeries + neurodata_type_inc: TimeSeries + doc: "Direction, e.g., of gaze or travel, or position. The TimeSeries::data field\ + \ is a 2D array storing position or direction relative to some reference frame.\ + \ Array structure: [num measurements] [num dimensions]. Each SpatialSeries has\ + \ a text dataset reference_frame that indicates the zero-position, or the zero-axes\ + \ for direction. For example, if representing gaze direction, 'straight-ahead'\ + \ might be a specific pixel on the monitor, or some other point in space. For\ + \ position data, the 0,0 point might be the top-left corner of an enclosure, as\ + \ viewed from the tracking camera. The unit of data will indicate how to interpret\ + \ SpatialSeries values." + datasets: + - name: data + dtype: numeric + dims: + - - num_times + - - num_times + - x + - - num_times + - x,y + - - num_times + - x,y,z + shape: + - - null + - - null + - 1 + - - null + - 2 + - - null + - 3 + doc: 1-D or 2-D array storing position or direction relative to some reference frame. + attributes: + - name: unit + dtype: text + default_value: meters + doc: Base unit of measurement for working with the data. The default value + is 'meters'. Actual stored values are not necessarily stored in these units. + To access the data in these units, multiply 'data' by 'conversion' and add 'offset'. + required: false + - name: reference_frame + dtype: text + doc: Description defining what exactly 'straight-ahead' means. + quantity: '?' + +- neurodata_type_def: BehavioralEpochs + neurodata_type_inc: NWBDataInterface + default_name: BehavioralEpochs + doc: TimeSeries for storing behavioral epochs. The objective of this and the other + two Behavioral interfaces (e.g. BehavioralEvents and BehavioralTimeSeries) is + to provide generic hooks for software tools/scripts. This allows a tool/script + to take the output one specific interface (e.g., UnitTimes) and plot that data + relative to another data modality (e.g., behavioral events) without having to + define all possible modalities in advance. Declaring one of these interfaces means + that one or more TimeSeries of the specified type is published. These TimeSeries + should reside in a group having the same name as the interface. For example, if + a BehavioralTimeSeries interface is declared, the module will have one or more + TimeSeries defined in the module sub-group 'BehavioralTimeSeries'. BehavioralEpochs + should use IntervalSeries. BehavioralEvents is used for irregular events. BehavioralTimeSeries + is for continuous data. + groups: + - neurodata_type_inc: IntervalSeries + doc: IntervalSeries object containing start and stop times of epochs. + quantity: '+' + +- neurodata_type_def: BehavioralEvents + neurodata_type_inc: NWBDataInterface + default_name: BehavioralEvents + doc: TimeSeries for storing behavioral events. See description of BehavioralEpochs + for more details. + groups: + - neurodata_type_inc: TimeSeries + doc: TimeSeries object containing behavioral events. + quantity: '+' + +- neurodata_type_def: BehavioralTimeSeries + neurodata_type_inc: NWBDataInterface + default_name: BehavioralTimeSeries + doc: TimeSeries for storing behavioral time series data. See description of BehavioralEpochs + for more details. + groups: + - neurodata_type_inc: TimeSeries + doc: TimeSeries object containing continuous behavioral data. + quantity: '+' + +- neurodata_type_def: PupilTracking + neurodata_type_inc: NWBDataInterface + default_name: PupilTracking + doc: Eye-tracking data, representing pupil size. + groups: + - neurodata_type_inc: TimeSeries + doc: TimeSeries object containing time series data on pupil size. + quantity: '+' + +- neurodata_type_def: EyeTracking + neurodata_type_inc: NWBDataInterface + default_name: EyeTracking + doc: Eye-tracking data, representing direction of gaze. + groups: + - neurodata_type_inc: SpatialSeries + doc: SpatialSeries object containing data measuring direction of gaze. + quantity: '+' + +- neurodata_type_def: CompassDirection + neurodata_type_inc: NWBDataInterface + default_name: CompassDirection + doc: With a CompassDirection interface, a module publishes a SpatialSeries object + representing a floating point value for theta. The SpatialSeries::reference_frame + field should indicate what direction corresponds to 0 and which is the direction + of rotation (this should be clockwise). The si_unit for the SpatialSeries should + be radians or degrees. + groups: + - neurodata_type_inc: SpatialSeries + doc: SpatialSeries object containing direction of gaze travel. + quantity: '+' + +- neurodata_type_def: Position + neurodata_type_inc: NWBDataInterface + default_name: Position + doc: Position data, whether along the x, x/y or x/y/z axis. + groups: + - neurodata_type_inc: SpatialSeries + doc: SpatialSeries object containing position data. + quantity: '+' diff --git a/nwb-schema/2.10.0/core/nwb.device.yaml b/nwb-schema/2.10.0/core/nwb.device.yaml new file mode 100644 index 000000000..5d343a14b --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.device.yaml @@ -0,0 +1,58 @@ +groups: +- neurodata_type_def: Device + neurodata_type_inc: NWBContainer + doc: Metadata about a specific instance of a data acquisition device, e.g., recording system, electrode, microscope. + Link to a DeviceModel.model to represent information about the model of the device. + attributes: + - name: description + dtype: text + doc: Description of the device as free-form text. If there is any software/firmware associated + with the device, the names and versions of those can be added to NWBFile.was_generated_by. + required: false + - name: manufacturer + dtype: text + doc: DEPRECATED. The name of the manufacturer of the device, e.g., Imec, Plexon, Thorlabs. + Instead of using this field, store the value in DeviceModel.manufacturer and link to that + DeviceModel from this Device. + required: false + - name: model_number + dtype: text + doc: DEPRECATED. The model number (or part/product number) of the device, e.g., PRB_1_4_0480_1, + PLX-VP-32-15SE(75)-(260-80)(460-10)-300-(1)CON/32m-V, BERGAMO. + Instead of using this field, store the value in DeviceModel.model_number and link to that + DeviceModel from this Device. + required: false + - name: model_name + dtype: text + doc: DEPRECATED. The model name of the device, e.g., Neuropixels 1.0, V-Probe, Bergamo III. + Instead of using this field, create and add a new DeviceModel named the model name and + link to that DeviceModel from this Device. + required: false + - name: serial_number + dtype: text + doc: The serial number of the device. + required: false + links: + - name: model + target_type: DeviceModel + doc: The model of the device. + quantity: '?' +- neurodata_type_def: DeviceModel + neurodata_type_inc: NWBContainer + doc: Model properties of a data acquisition device, e.g., recording system, electrode, microscope. + This should be extended for specific types of device models to include additional attributes specific to each type. + The name of the DeviceModel should be the most common representation of the model name, e.g., + Neuropixels 1.0, V-Probe, Bergamo III. + attributes: + - name: manufacturer + dtype: text + doc: The name of the manufacturer of the device model, e.g., Imec, Plexon, Thorlabs. + - name: model_number + dtype: text + doc: The model number (or part/product number) of the device, e.g., PRB_1_4_0480_1, + PLX-VP-32-15SE(75)-(260-80)(460-10)-300-(1)CON/32m-V, BERGAMO. + required: false + - name: description + dtype: text + doc: Description of the device model as free-form text. + required: false diff --git a/nwb-schema/2.10.0/core/nwb.ecephys.yaml b/nwb-schema/2.10.0/core/nwb.ecephys.yaml new file mode 100644 index 000000000..2e7cc62e1 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.ecephys.yaml @@ -0,0 +1,410 @@ +groups: +- neurodata_type_def: ElectricalSeries + neurodata_type_inc: TimeSeries + doc: A time series of acquired voltage data from extracellular recordings. + The data field is an int or float array storing data in volts. The first + dimension should always represent time. The second dimension, if present, + should represent channels. + attributes: + - name: filtering + dtype: text + doc: Filtering applied to all channels of the data. For example, if this ElectricalSeries represents + high-pass-filtered data (also known as AP Band), then this value could be "High-pass 4-pole Bessel filter + at 500 Hz". If this ElectricalSeries represents low-pass-filtered LFP data and the type of filter is unknown, + then this value could be "Low-pass filter at 300 Hz". If a non-standard filter type is used, provide as much + detail about the filter properties as possible. + required: false + datasets: + - name: data + dtype: numeric + dims: + - - num_times + - - num_times + - num_channels + - - num_times + - num_channels + - num_samples + shape: + - - null + - - null + - null + - - null + - null + - null + doc: Recorded voltage data. + attributes: + - name: unit + dtype: text + value: volts + doc: Base unit of measurement for working with the data. This value is fixed to + 'volts'. Actual stored values are not necessarily stored in these units. To + access the data in these units, multiply 'data' by 'conversion', followed by + 'channel_conversion' (if present), and then add 'offset'. + - name: electrodes + neurodata_type_inc: DynamicTableRegion + doc: DynamicTableRegion pointer to the electrodes that this time series was generated from. + - name: channel_conversion + dtype: float32 + dims: + - num_channels + shape: + - null + doc: Channel-specific conversion factor. Multiply the data in the 'data' dataset by these + values along the channel axis (as indicated by axis attribute) AND by the global + conversion factor in the 'conversion' attribute of 'data' to get the data values in + Volts, i.e, data in Volts = data * data.conversion * channel_conversion. This + approach allows for both global and per-channel data conversion factors needed + to support the storage of electrical recordings as native values generated by data + acquisition systems. If this dataset is not present, then there is no channel-specific + conversion factor, i.e. it is 1 for all channels. + quantity: '?' + attributes: + - name: axis + dtype: int32 + value: 1 + doc: The zero-indexed axis of the 'data' dataset that the channel-specific conversion + factor corresponds to. This value is fixed to 1. + +- neurodata_type_def: SpikeEventSeries + neurodata_type_inc: ElectricalSeries + doc: "Stores snapshots/snippets of recorded spike events (i.e., threshold crossings). This + may also be raw data, as reported by ephys hardware. If so, the TimeSeries::description + field should describe how events were detected. All events span the same recording + channels and store + snapshots of equal duration. TimeSeries::data array structure: [num events] + [num channels] [num samples] (or [num events] [num samples] for single electrode)." + datasets: + - name: data + dtype: numeric + dims: + - - num_events + - num_samples + - - num_events + - num_channels + - num_samples + shape: + - - null + - null + - - null + - null + - null + doc: Spike waveforms. + attributes: + - name: unit + dtype: text + value: volts + doc: Unit of measurement for waveforms, which is fixed to 'volts'. + - name: timestamps + dtype: float64 + dims: + - num_times + shape: + - null + doc: Timestamps for samples stored in data, in seconds, relative to the + common experiment master-clock stored in NWBFile.timestamps_reference_time. + Timestamps are required for the events. Unlike for TimeSeries, timestamps are + required for SpikeEventSeries and are thus re-specified here. + quantity: 1 + attributes: + - name: interval + dtype: int32 + value: 1 + doc: Value is '1' + - name: unit + dtype: text + value: seconds + doc: Unit of measurement for timestamps, which is fixed to 'seconds'. + +- neurodata_type_def: FeatureExtraction + neurodata_type_inc: NWBDataInterface + default_name: FeatureExtraction + doc: Features, such as PC1 and PC2, that are extracted from signals stored in a + SpikeEventSeries or other source. + datasets: + - name: description + dtype: text + dims: + - num_features + shape: + - null + doc: Description of features (eg, ''PC1'') for each of the extracted features. + - name: features + dtype: float32 + dims: + - num_events + - num_channels + - num_features + shape: + - null + - null + - null + doc: Multi-dimensional array of features extracted from each event. + - name: times + dtype: float64 + dims: + - num_events + shape: + - null + doc: Times of events that features correspond to (can be a link). + - name: electrodes + neurodata_type_inc: DynamicTableRegion + doc: DynamicTableRegion pointer to the electrodes that this time series was generated from. + +- neurodata_type_def: EventDetection + neurodata_type_inc: NWBDataInterface + default_name: EventDetection + doc: Detected spike events from voltage trace(s). + datasets: + - name: detection_method + dtype: text + doc: Description of how events were detected, such as voltage threshold, or dV/dT + threshold, as well as relevant values. + - name: source_idx + dtype: int32 + dims: + - - num_events + - - num_events + - time_index, channel_index + shape: + - - null + - - null + - 2 + doc: Indices (zero-based) into the linked source ElectricalSeries::data array corresponding + to time of event or time and channel of event. ''description'' should define what is meant + by time of event (e.g., .25 ms before action potential peak, zero-crossing time, etc). + The index points to each event from the raw data. + - name: times + dtype: float64 + dims: + - num_events + shape: + - null + doc: DEPRECATED. Timestamps of events, in seconds. + attributes: + - name: unit + dtype: text + value: seconds + doc: Unit of measurement for event times, which is fixed to 'seconds'. + quantity: '?' + links: + - name: source_electricalseries + target_type: ElectricalSeries + doc: Link to the ElectricalSeries that this data was calculated from. Metadata + about electrodes and their position can be read from that ElectricalSeries so + it's not necessary to include that information here. + +- neurodata_type_def: EventWaveform + neurodata_type_inc: NWBDataInterface + default_name: EventWaveform + doc: DEPRECATED. Represents either the waveforms of detected events, as extracted from a raw + data trace in /acquisition, or the event waveforms that were stored during experiment + acquisition. + groups: + - neurodata_type_inc: SpikeEventSeries + doc: SpikeEventSeries object(s) containing detected spike event waveforms. + quantity: '+' + +- neurodata_type_def: FilteredEphys + neurodata_type_inc: NWBDataInterface + default_name: FilteredEphys + doc: Electrophysiology data from one or more channels that has been subjected to filtering. + Examples of filtered data include Theta and Gamma (LFP has its own interface). + FilteredEphys modules publish an ElectricalSeries for each filtered channel or + set of channels. The name of each ElectricalSeries is arbitrary but should be + informative. The source of the filtered data, whether this is from analysis of + another time series or as acquired by hardware, should be noted in each's TimeSeries::description + field. There is no assumed 1::1 correspondence between filtered ephys signals + and electrodes, as a single signal can apply to many nearby electrodes, and one + electrode may have different filtered (e.g., theta and/or gamma) signals represented. + Filter properties should be noted in the ElectricalSeries 'filtering' attribute. + groups: + - neurodata_type_inc: ElectricalSeries + doc: ElectricalSeries object(s) containing filtered electrophysiology data. + quantity: '+' + +- neurodata_type_def: LFP + neurodata_type_inc: NWBDataInterface + default_name: LFP + doc: LFP data from one or more channels. The electrode map in each published ElectricalSeries + will identify which channels are providing LFP data. Filter properties should + be noted in the ElectricalSeries 'filtering' attribute. + groups: + - neurodata_type_inc: ElectricalSeries + doc: ElectricalSeries object(s) containing LFP data for one or more channels. + quantity: '+' + +- neurodata_type_def: ElectrodeGroup + neurodata_type_inc: NWBContainer + doc: A physical grouping of electrodes, e.g., a shank of an array. An electrode group is typically used to describe + electrodes that are physically connected on a single device and are often (but not always) used together for + analysis, such as for spike sorting. Note that this is descriptive metadata; electrodes from different groups + can still be spike-sorted together if needed. + attributes: + - name: description + dtype: text + doc: Description of this electrode group. + - name: location + dtype: text + doc: Location of electrode group. Specify the area, layer, comments on estimation + of area/layer, etc. Use standard atlas names for anatomical regions when possible. + datasets: + - name: position + dtype: + - name: x + dtype: float32 + doc: x coordinate + - name: y + dtype: float32 + doc: y coordinate + - name: z + dtype: float32 + doc: z coordinate + doc: stereotaxic or common framework coordinates + quantity: '?' + links: + - name: device + target_type: Device + doc: Link to the device that was used to record from this electrode group. + +- neurodata_type_def: ElectrodesTable + neurodata_type_inc: DynamicTable + doc: A table of all electrodes (i.e. channels) used for recording. Introduced in NWB 2.8.0. Replaces the "electrodes" + table (neurodata_type_inc DynamicTable, no neurodata_type_def) that is part of NWBFile. + datasets: + - name: location + neurodata_type_inc: VectorData + dtype: text + doc: Location of the electrode (channel). Specify the area, layer, comments + on estimation of area/layer, stereotaxic coordinates if in vivo, etc. Use + standard atlas names for anatomical regions when possible. + - name: group + neurodata_type_inc: VectorData + dtype: + target_type: ElectrodeGroup + reftype: object + doc: Reference to the ElectrodeGroup this electrode is a part of. + - name: group_name + neurodata_type_inc: VectorData + dtype: text + doc: Name of the ElectrodeGroup this electrode is a part of. + quantity: '?' + - name: x + neurodata_type_inc: VectorData + dtype: float32 + doc: x coordinate of the channel location in the brain (+x is posterior). Units should be specified in microns. + quantity: '?' + - name: y + neurodata_type_inc: VectorData + dtype: float32 + doc: y coordinate of the channel location in the brain (+y is inferior). Units should be specified in microns. + quantity: '?' + - name: z + neurodata_type_inc: VectorData + dtype: float32 + doc: z coordinate of the channel location in the brain (+z is right). Units should be specified in microns. + quantity: '?' + - name: imp + neurodata_type_inc: VectorData + dtype: float32 + doc: Impedance of the channel, in ohms. + quantity: '?' + - name: filtering + neurodata_type_inc: VectorData + dtype: text + doc: Description of hardware filtering, including the filter name and frequency cutoffs. + quantity: '?' + - name: rel_x + neurodata_type_inc: VectorData + dtype: float32 + doc: x coordinate in electrode group. Units should be specified in microns. + quantity: '?' + - name: rel_y + neurodata_type_inc: VectorData + dtype: float32 + doc: y coordinate in electrode group. Units should be specified in microns. + quantity: '?' + - name: rel_z + neurodata_type_inc: VectorData + dtype: float32 + doc: z coordinate in electrode group. Units should be specified in microns. + quantity: '?' + - name: reference + neurodata_type_inc: VectorData + dtype: text + doc: Description of the reference electrode and/or reference scheme used for this electrode, e.g., + "stainless steel skull screw" or "online common average referencing". + quantity: '?' + +# The types below have been deprecated +- neurodata_type_def: ClusterWaveforms + neurodata_type_inc: NWBDataInterface + default_name: ClusterWaveforms + doc: DEPRECATED The mean waveform shape, including standard deviation, of the different + clusters. Ideally, the waveform analysis should be performed on data that is only + high-pass filtered. This is a separate module because it is expected to require + updating. For example, IMEC probes may require different storage requirements + to store/display mean waveforms, requiring a new interface or an extension of + this one. + datasets: + - name: waveform_filtering + dtype: text + doc: Filtering applied to data before generating mean/sd + - name: waveform_mean + dtype: float32 + dims: + - num_clusters + - num_samples + shape: + - null + - null + doc: The mean waveform for each cluster, using the same indices for each wave + as cluster numbers in the associated Clustering module (i.e, cluster 3 is in + array slot [3]). Waveforms corresponding to gaps in cluster sequence should + be empty (e.g., zero- filled) + - name: waveform_sd + dtype: float32 + dims: + - num_clusters + - num_samples + shape: + - null + - null + doc: Stdev of waveforms for each cluster, using the same indices as in mean + links: + - name: clustering_interface + target_type: Clustering + doc: Link to Clustering interface that was the source of the clustered data + +- neurodata_type_def: Clustering + neurodata_type_inc: NWBDataInterface + default_name: Clustering + doc: DEPRECATED Clustered spike data, whether from automatic clustering tools (e.g., + klustakwik) or as a result of manual sorting. + datasets: + - name: description + dtype: text + doc: Description of clusters or clustering, (e.g. cluster 0 is noise, clusters + curated using Klusters, etc) + - name: num + dtype: int32 + dims: + - num_events + shape: + - null + doc: Cluster number of each event + - name: peak_over_rms + dtype: float32 + dims: + - num_clusters + shape: + - null + doc: Maximum ratio of waveform peak to RMS on any channel in the cluster (provides + a basic clustering metric). + - name: times + dtype: float64 + dims: + - num_events + shape: + - null + doc: Times of clustered events, in seconds. This may be a link to times field + in associated FeatureExtraction module. diff --git a/nwb-schema/2.10.0/core/nwb.epoch.yaml b/nwb-schema/2.10.0/core/nwb.epoch.yaml new file mode 100644 index 000000000..a4c06a906 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.epoch.yaml @@ -0,0 +1,31 @@ +groups: +- neurodata_type_def: TimeIntervals + neurodata_type_inc: DynamicTable + doc: A container for aggregating epoch data and the TimeSeries that each epoch applies + to. + datasets: + - name: start_time + neurodata_type_inc: VectorData + dtype: float32 + doc: Start time of epoch, in seconds. + - name: stop_time + neurodata_type_inc: VectorData + dtype: float32 + doc: Stop time of epoch, in seconds. + - name: tags + neurodata_type_inc: VectorData + dtype: text + doc: User-defined tags that identify or categorize events. + quantity: '?' + - name: tags_index + neurodata_type_inc: VectorIndex + doc: Index for tags. + quantity: '?' + - name: timeseries + neurodata_type_inc: TimeSeriesReferenceVectorData + doc: An index into a TimeSeries object. + quantity: '?' + - name: timeseries_index + neurodata_type_inc: VectorIndex + doc: Index for timeseries. + quantity: '?' diff --git a/nwb-schema/2.10.0/core/nwb.file.yaml b/nwb-schema/2.10.0/core/nwb.file.yaml new file mode 100644 index 000000000..2d74de372 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.file.yaml @@ -0,0 +1,478 @@ +groups: +- neurodata_type_def: NWBFile + neurodata_type_inc: NWBContainer + name: root + doc: An NWB file storing cellular-based neurophysiology data from a single + experimental session. + attributes: + - name: nwb_version + dtype: text + value: "2.10.0-alpha" + doc: File version string. Use semantic versioning, e.g. 1.2.1. This will be the + name of the format with trailing major, minor and patch numbers. + datasets: + - name: file_create_date + dtype: isodatetime + dims: + - num_modifications + shape: + - null + doc: 'A record of the date the file was created and of subsequent modifications. + The date is stored in UTC with local timezone offset as ISO 8601 + extended formatted strings: 2018-09-28T14:43:54.123+02:00. Dates stored in + UTC end in "Z" with no timezone offset. Date accuracy is up to milliseconds. + The file can be created after the experiment was run, so this may differ from + the experiment start time. Each modification to the nwb file adds a new entry + to the array.' + - name: identifier + dtype: text + doc: A unique text identifier for the file. For example, concatenated lab name, + file creation date/time and experimentalist, or a hash of these and/or other + values. The goal is that the string should be unique to all other files. + - name: session_description + dtype: text + doc: A description of the experimental session and data in the file. + - name: session_start_time + dtype: isodatetime + doc: 'Date and time of the experiment/session start. The date is stored + in UTC with local timezone offset as ISO 8601 extended formatted string: + 2018-09-28T14:43:54.123+02:00. + Dates stored in UTC end in "Z" with no timezone offset. Date accuracy is + up to milliseconds.' + - name: timestamps_reference_time + dtype: isodatetime + doc: 'Date and time corresponding to time zero of all timestamps. The + date is stored in UTC with local timezone offset as ISO 8601 extended formatted + string: 2018-09-28T14:43:54.123+02:00. Dates stored in UTC end in "Z" with + no timezone offset. Date accuracy is up to milliseconds. All times stored + in the file use this time as reference (i.e., time zero).' + groups: + - name: acquisition + doc: Data streams recorded from the system, including ephys, ophys, tracking, + etc. This group should be read-only after the experiment is completed and + timestamps are corrected to a common timebase. The data stored here may be links + to raw data stored in external NWB files. This will allow keeping bulky raw + data out of the file while preserving the option of keeping some/all in the + file. Acquired data includes tracking and experimental data streams + (i.e., everything measured from the system). If bulky data is stored in the /acquisition + group, the data can exist in a separate NWB file that is linked to by the file + being used for processing and analysis. + groups: + - neurodata_type_inc: NWBDataInterface + doc: Acquired, raw data. + quantity: '*' + - neurodata_type_inc: DynamicTable + doc: Tabular data that is relevant to acquisition + quantity: '*' + - name: analysis + doc: Lab-specific and custom scientific analysis of data. There is no defined + format for the content of this group - the format is up to the individual user/lab. + To facilitate sharing analysis data between labs, the contents here + should be stored in standard types (e.g., neurodata_types) and appropriately documented. + The file can store lab-specific and custom data analysis without + restriction on its form or schema, reducing data formatting restrictions on + end users. Such data should be placed in the analysis group. The analysis data + should be documented so that it could be shared with other labs. + groups: + - neurodata_type_inc: NWBContainer + doc: Custom analysis results. + quantity: '*' + - neurodata_type_inc: DynamicTable + doc: Tabular data that is relevant to data stored in analysis + quantity: '*' + - name: scratch + doc: 'A place to store one-off analysis results. Data placed here is not intended for + sharing. By placing data here, users acknowledge that there is no guarantee that + their data meets any standard.' + quantity: '?' + groups: + - neurodata_type_inc: NWBContainer + doc: Any one-off containers + quantity: '*' + - neurodata_type_inc: DynamicTable + doc: Any one-off tables + quantity: '*' + datasets: + - neurodata_type_inc: ScratchData + doc: Any one-off datasets + quantity: '*' + - name: processing + doc: "The home for ProcessingModules. These modules perform intermediate analysis\ + \ of data that is necessary to perform before scientific analysis. Examples\ + \ include spike clustering, extracting position from tracking data, stitching\ + \ together image slices. ProcessingModules can be large\ + \ and express many data sets from relatively complex analysis (e.g., spike detection\ + \ and clustering) or small, representing extraction of position information\ + \ from tracking video, or even binary lick/no-lick decisions. Common software\ + \ tools (e.g., klustakwik, MClust) are expected to read/write data here. \ + \ 'Processing' refers to intermediate analysis of the acquired data to make\ + \ it more amenable to scientific analysis." + groups: + - neurodata_type_inc: ProcessingModule + doc: Intermediate analysis of acquired data. + quantity: '*' + - name: stimulus + doc: 'Data pushed into the system (eg, video stimulus, sound, voltage, etc) and + secondary representations of that data (eg, measurements of something used as + a stimulus). This group should be made read-only after experiment complete and timestamps + are corrected to common timebase. Stores both presented stimuli and stimulus + templates, the latter in case the same stimulus is presented multiple times, + or is pulled from an external stimulus library. Stimuli are here + defined as any signal that is pushed into the system as part of the experiment + (eg, sound, video, voltage, etc). Many different experiments can use the same + stimuli, and stimuli can be reused during an experiment. The stimulus group + is organized so that one version of template stimuli can be stored and these + be used multiple times. These templates can exist in the present file or can + be linked to a remote library file.' + groups: + - name: presentation + doc: Stimuli presented during the experiment. + groups: + - neurodata_type_inc: TimeSeries + doc: TimeSeries objects containing data of presented stimuli. + quantity: '*' + - neurodata_type_inc: NWBDataInterface + doc: 'Generic NWB data interfaces, usually from an extension, + containing data of presented stimuli.' + quantity: '*' + - neurodata_type_inc: DynamicTable + doc: DynamicTable objects containing data of presented stimuli. + quantity: '*' + # even though TimeSeries is a child type of NWBDataInterface, we do not remove TimeSeries + # in order to maintain backwards compatibility in the APIs that + # use the neurodata_type_inc from the schema to set the variable name + - name: templates + doc: 'Template stimuli. Timestamps in templates are based on stimulus + design and are relative to the beginning of the stimulus. When templates are + used, the stimulus instances must convert presentation times to the experiment`s + time reference frame.' + groups: + - neurodata_type_inc: TimeSeries + doc: TimeSeries objects containing template data of presented stimuli. + quantity: '*' + - neurodata_type_inc: Images + doc: Images objects containing images of presented stimuli. + quantity: '*' + - name: general + doc: "Experimental metadata, including protocol, notes and description of hardware\ + \ device(s). The metadata stored in this section should be used to\ + \ describe the experiment. Metadata necessary for interpreting the data is stored\ + \ with the data. General experimental metadata, including animal\ + \ strain, experimental protocols, experimenter, devices, etc, are stored under\ + \ 'general'. Core metadata (e.g., that required to interpret data fields) is\ + \ stored with the data itself, and implicitly defined by the file specification\ + \ (e.g., time is in seconds). The strategy used here for storing non-core metadata\ + \ is to use free-form text fields, such as would appear in sentences or paragraphs\ + \ from a Methods section. Metadata fields are text to enable them to be more\ + \ general, for example to represent ranges instead of numerical values. Machine-readable\ + \ metadata is stored as attributes to these free-form datasets. All entries\ + \ in the below table are to be included when data is present. Unused groups\ + \ (e.g., intracellular_ephys in an optophysiology experiment) should not be\ + \ created unless there is data to store within them." + datasets: + - name: data_collection + dtype: text + doc: Notes about data collection and analysis. + quantity: '?' + - name: experiment_description + dtype: text + doc: General description of the experiment. + quantity: '?' + - name: experimenter + dtype: text + doc: Name of person(s) who performed the experiment. Can also specify roles + of different people involved. + quantity: '?' + dims: + - num_experimenters + shape: + - null + - name: institution + dtype: text + doc: Institution(s) where experiment was performed. + quantity: '?' + - name: keywords + dtype: text + dims: + - num_keywords + shape: + - null + doc: Terms to search over. + quantity: '?' + - name: lab + dtype: text + doc: Laboratory where experiment was performed. + quantity: '?' + - name: notes + dtype: text + doc: Notes about the experiment. + quantity: '?' + - name: pharmacology + dtype: text + doc: Description of drugs used, including how and when they were administered. + Anesthesia(s), painkiller(s), etc., plus dosage, concentration, etc. + quantity: '?' + - name: protocol + dtype: text + doc: Experimental protocol, if applicable. e.g., include IACUC protocol number. + quantity: '?' + - name: related_publications + dtype: text + doc: Publication information. PMID, DOI, URL, etc. + dims: + - num_publications + shape: + - null + quantity: '?' + - name: session_id + dtype: text + doc: Lab-specific ID for the session. + quantity: '?' + - name: slices + dtype: text + doc: Description of slices, including information about preparation thickness, + orientation, temperature, and bath solution. + quantity: '?' + - name: source_script + dtype: text + doc: Script file or link to public source code used to create this NWB file. + quantity: '?' + attributes: + - name: file_name + dtype: text + doc: Name of script file. + - name: was_generated_by + dtype: text + doc: Name and version of software package(s) used to generate data contained in + this NWB File. For each software package or library, include the name of the + software as the first value and the version as the second value. + dims: + - num_sources + - name, version + shape: + - null + - 2 + quantity: '?' + - name: stimulus + dtype: text + doc: Notes about stimuli, such as how and where they were presented. + quantity: '?' + - name: surgery + dtype: text + doc: Narrative description about surgery/surgeries, including date(s) and who + performed surgery. + quantity: '?' + - name: virus + dtype: text + doc: Information about virus(es) used in experiments, including virus ID, source, + date made, injection location, volume, etc. + quantity: '?' + groups: + - name: external_resources + neurodata_type_inc: HERD + doc: This is the HERD structure for this specific NWBFile, storing the mapped external resources. + quantity: "?" + - neurodata_type_inc: LabMetaData + doc: Place-holder than can be extended so that lab-specific meta-data can be + placed in /general. + quantity: '*' + - name: devices + doc: Description of hardware devices used during experiment, e.g., monitors, + ADC boards, microscopes, etc. + quantity: '?' + groups: + - neurodata_type_inc: Device + doc: Data acquisition devices. + quantity: '*' + - name: models + doc: Collection of data acquisition device models. + quantity: '?' + groups: + - neurodata_type_inc: DeviceModel + doc: Data acquisition device models. + quantity: '*' + - name: subject + neurodata_type_inc: Subject + doc: Information about the animal or person from which the data was measured. + quantity: '?' + - name: extracellular_ephys + doc: Metadata related to extracellular electrophysiology. + quantity: '?' + groups: + - neurodata_type_inc: ElectrodeGroup + doc: Physical group of electrodes. + quantity: '*' + - name: electrodes + neurodata_type_inc: ElectrodesTable + doc: A table of all electrodes (i.e. channels) used for recording. Changed in NWB 2.9.0 to use the newly added + ElectrodesTable neurodata type instead of a DynamicTable with added columns. + quantity: '?' + - name: intracellular_ephys + doc: Metadata related to intracellular electrophysiology. + quantity: '?' + datasets: + - name: filtering + dtype: text + doc: '[DEPRECATED] Use IntracellularElectrode.filtering instead. Description + of filtering used. Includes filtering type and parameters, frequency fall-off, + etc. If this changes between TimeSeries, filter description should be stored + as a text attribute for each TimeSeries.' + quantity: '?' + groups: + - neurodata_type_inc: IntracellularElectrode + doc: An intracellular electrode. + quantity: '*' + - name: sweep_table + neurodata_type_inc: SweepTable + doc: '[DEPRECATED] Table used to group different PatchClampSeries. SweepTable + is being replaced by IntracellularRecordingsTable and SimultaneousRecordingsTable + tables. Additional SequentialRecordingsTable, RepetitionsTable and + ExperimentalConditions tables provide enhanced support for experiment metadata.' + quantity: '?' + - name: intracellular_recordings + neurodata_type_inc: IntracellularRecordingsTable + doc: A table to group together a stimulus and response from a single electrode + and a single simultaneous recording. Each row in the table represents a + single recording consisting typically of a stimulus and a corresponding + response. In some cases, however, only a stimulus or a response are recorded + as as part of an experiment. In this case both, the stimulus and response + will point to the same TimeSeries while the idx_start and count of the invalid + column will be set to -1, thus, indicating that no values have been recorded + for the stimulus or response, respectively. Note, a recording MUST contain + at least a stimulus or a response. Typically the stimulus and response are + PatchClampSeries. However, the use of AD/DA channels that are not associated + to an electrode is also common in intracellular electrophysiology, in which + case other TimeSeries may be used. + quantity: '?' + - name: simultaneous_recordings + neurodata_type_inc: SimultaneousRecordingsTable + doc: A table for grouping different intracellular recordings from the IntracellularRecordingsTable + table together that were recorded simultaneously from different electrodes + quantity: '?' + - name: sequential_recordings + neurodata_type_inc: SequentialRecordingsTable + doc: A table for grouping different sequential recordings from the SimultaneousRecordingsTable + table together. This is typically used to group together sequential recordings + where the a sequence of stimuli of the same type with varying parameters + have been presented in a sequence. + quantity: '?' + - name: repetitions + neurodata_type_inc: RepetitionsTable + doc: A table for grouping different sequential intracellular recordings together. + With each SequentialRecording typically representing a particular type of + stimulus, the RepetitionsTable table is typically used to group sets of + stimuli applied in sequence. + quantity: '?' + - name: experimental_conditions + neurodata_type_inc: ExperimentalConditionsTable + doc: A table for grouping different intracellular recording repetitions together + that belong to the same experimental experimental_conditions. + quantity: '?' + - name: optogenetics + doc: Metadata describing optogenetic stimuluation. + quantity: '?' + groups: + - neurodata_type_inc: OptogeneticStimulusSite + doc: An optogenetic stimulation site. + quantity: '*' + - name: optophysiology + doc: Metadata related to optophysiology. + quantity: '?' + groups: + - neurodata_type_inc: ImagingPlane + doc: An imaging plane. + quantity: '*' + - name: intervals + doc: Experimental intervals, whether that be logically distinct sub-experiments + having a particular scientific goal, trials (see trials subgroup) during an + experiment, or epochs (see epochs subgroup) deriving from analysis of data. + quantity: '?' + groups: + - name: epochs + neurodata_type_inc: TimeIntervals + doc: Divisions in time marking experimental stages or sub-divisions of a single + recording session. + quantity: '?' + - name: trials + neurodata_type_inc: TimeIntervals + doc: Repeated experimental events that have a logical grouping. + quantity: '?' + - name: invalid_times + neurodata_type_inc: TimeIntervals + doc: Time intervals that should be removed from analysis. + quantity: '?' + - neurodata_type_inc: TimeIntervals + doc: Optional additional table(s) for describing other experimental time intervals. + quantity: '*' + - name: units + neurodata_type_inc: Units + doc: Data about sorted spike units. + quantity: '?' + +- neurodata_type_def: LabMetaData + neurodata_type_inc: NWBContainer + doc: Lab-specific meta-data. + +- neurodata_type_def: Subject + neurodata_type_inc: NWBContainer + doc: Information about the animal or person from which the data was measured. + datasets: + - name: age + dtype: text + doc: "Age of subject. Can be supplied instead of 'date_of_birth'. + The ISO 8601 Duration format is recommended, e.g., 'P90D' for 90 days old. + If the precise age is unknown, an age range can be given by '[lower bound]/[upper bound]' e.g. + 'P10D/P20D' would mean that the age is in between 10 and 20 days. If only the lower bound is known, + then including only the slash after that lower bound can be used to indicate a missing bound. + For instance, 'P90Y/' would indicate that the age is 90 years or older." + quantity: '?' + attributes: + - name: reference + doc: "Age is with reference to this event. Can be 'birth' or + 'gestational'. If reference is omitted, 'birth' is implied." + dtype: text + required: false + default_value: birth + - name: date_of_birth + dtype: isodatetime + doc: Date of birth of subject. Can be supplied instead of 'age'. + quantity: '?' + - name: description + dtype: text + doc: Description of subject and where subject came from (e.g., breeder, if + animal). + quantity: '?' + - name: genotype + dtype: text + doc: Genetic strain. If absent, assume Wild Type (WT). + quantity: '?' + - name: sex + dtype: text + doc: Gender of subject. + quantity: '?' + - name: species + dtype: text + doc: Species of subject. + quantity: '?' + - name: strain + dtype: text + doc: Strain of subject. + quantity: '?' + - name: subject_id + dtype: text + doc: ID of animal/person used/participating in experiment (lab convention). + quantity: '?' + - name: weight + dtype: text + doc: Weight at time of experiment, at time of surgery and at other important + times. + quantity: '?' + +datasets: +- neurodata_type_def: ScratchData + neurodata_type_inc: NWBData + doc: Any one-off datasets + attributes: + - name: notes + doc: 'Any notes the user has about the dataset being stored' + dtype: text diff --git a/nwb-schema/2.10.0/core/nwb.icephys.yaml b/nwb-schema/2.10.0/core/nwb.icephys.yaml new file mode 100644 index 000000000..62c1e46b7 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.icephys.yaml @@ -0,0 +1,430 @@ +groups: +- neurodata_type_def: PatchClampSeries + neurodata_type_inc: TimeSeries + doc: An abstract base class for patch-clamp data - stimulus or response, + current or voltage. + attributes: + - name: stimulus_description + dtype: text + doc: Protocol/stimulus name for this patch-clamp dataset. + - name: sweep_number + dtype: uint32 + doc: Sweep number, allows to group different PatchClampSeries together. + required: false + datasets: + - name: data + dtype: numeric + dims: + - num_times + shape: + - null + doc: Recorded voltage or current. + attributes: + - name: unit + dtype: text + doc: Base unit of measurement for working with the data. Actual stored values are + not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + - name: gain + dtype: float32 + doc: Gain of the recording, in units Volt/Amp (v-clamp) or Volt/Volt (c-clamp). + quantity: '?' + links: + - name: electrode + target_type: IntracellularElectrode + doc: Link to IntracellularElectrode object that describes the electrode that was + used to apply or record this data. + +- neurodata_type_def: CurrentClampSeries + neurodata_type_inc: PatchClampSeries + doc: Voltage data from an intracellular current-clamp recording. A + corresponding CurrentClampStimulusSeries (stored separately as a stimulus) is + used to store the current injected. + datasets: + - name: data + doc: Recorded voltage. + attributes: + - name: unit + dtype: text + value: volts + doc: Base unit of measurement for working with the data. which is fixed to 'volts'. + Actual stored values are not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + - name: bias_current + dtype: float32 + doc: Bias current, in amps. + quantity: '?' + - name: bridge_balance + dtype: float32 + doc: Bridge balance, in ohms. + quantity: '?' + - name: capacitance_compensation + dtype: float32 + doc: Capacitance compensation, in farads. + quantity: '?' + +- neurodata_type_def: IZeroClampSeries + neurodata_type_inc: CurrentClampSeries + doc: Voltage data from an intracellular recording when all current + and amplifier settings are off (i.e., CurrentClampSeries fields will be zero). + There is no CurrentClampStimulusSeries associated with an IZero series because + the amplifier is disconnected and no stimulus can reach the cell. + attributes: + - name: stimulus_description + dtype: text + doc: An IZeroClampSeries has no stimulus, so this attribute is automatically set to "N/A" + value: N/A + datasets: + - name: bias_current + dtype: float32 + value: 0.0 + doc: Bias current, in amps, fixed to 0.0. + - name: bridge_balance + dtype: float32 + value: 0.0 + doc: Bridge balance, in ohms, fixed to 0.0. + - name: capacitance_compensation + dtype: float32 + value: 0.0 + doc: Capacitance compensation, in farads, fixed to 0.0. + +- neurodata_type_def: CurrentClampStimulusSeries + neurodata_type_inc: PatchClampSeries + doc: Stimulus current applied during current clamp recording. + datasets: + - name: data + doc: Stimulus current applied. + attributes: + - name: unit + dtype: text + value: amperes + doc: Base unit of measurement for working with the data. which is fixed to 'amperes'. + Actual stored values are not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + +- neurodata_type_def: VoltageClampSeries + neurodata_type_inc: PatchClampSeries + doc: Current data from an intracellular voltage-clamp recording. A + corresponding VoltageClampStimulusSeries (stored separately as a stimulus) is + used to store the voltage injected. + datasets: + - name: data + doc: Recorded current. + attributes: + - name: unit + dtype: text + value: amperes + doc: Base unit of measurement for working with the data. which is fixed to 'amperes'. + Actual stored values are not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + - name: capacitance_fast + dtype: float32 + doc: Fast capacitance, in farads. + quantity: '?' + attributes: + - name: unit + dtype: text + value: farads + doc: Unit of measurement for capacitance_fast, which is fixed to 'farads'. + - name: capacitance_slow + dtype: float32 + doc: Slow capacitance, in farads. + quantity: '?' + attributes: + - name: unit + dtype: text + value: farads + doc: Unit of measurement for capacitance_fast, which is fixed to 'farads'. + - name: resistance_comp_bandwidth + dtype: float32 + doc: Resistance compensation bandwidth, in hertz. + quantity: '?' + attributes: + - name: unit + dtype: text + value: hertz + doc: Unit of measurement for resistance_comp_bandwidth, which is fixed to 'hertz'. + - name: resistance_comp_correction + dtype: float32 + doc: Resistance compensation correction, in percent. + quantity: '?' + attributes: + - name: unit + dtype: text + value: percent + doc: Unit of measurement for resistance_comp_correction, which is fixed to 'percent'. + - name: resistance_comp_prediction + dtype: float32 + doc: Resistance compensation prediction, in percent. + quantity: '?' + attributes: + - name: unit + dtype: text + value: percent + doc: Unit of measurement for resistance_comp_prediction, which is fixed to 'percent'. + - name: whole_cell_capacitance_comp + dtype: float32 + doc: Whole cell capacitance compensation, in farads. + quantity: '?' + attributes: + - name: unit + dtype: text + value: farads + doc: Unit of measurement for whole_cell_capacitance_comp, which is fixed to 'farads'. + - name: whole_cell_series_resistance_comp + dtype: float32 + doc: Whole cell series resistance compensation, in ohms. + quantity: '?' + attributes: + - name: unit + dtype: text + value: ohms + doc: Unit of measurement for whole_cell_series_resistance_comp, which is fixed to 'ohms'. + +- neurodata_type_def: VoltageClampStimulusSeries + neurodata_type_inc: PatchClampSeries + doc: Stimulus voltage applied during a voltage clamp recording. + datasets: + - name: data + doc: Stimulus voltage applied. + attributes: + - name: unit + dtype: text + value: volts + doc: Base unit of measurement for working with the data. which is fixed to 'volts'. + Actual stored values are not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion' and add 'offset'. + +- neurodata_type_def: IntracellularElectrode + neurodata_type_inc: NWBContainer + doc: An intracellular electrode and its metadata. + datasets: + - name: cell_id + dtype: text + doc: unique ID of the cell + quantity: '?' + - name: description + dtype: text + doc: Description of electrode (e.g., whole-cell, sharp, etc.). + - name: filtering + dtype: text + doc: Electrode specific filtering. + quantity: '?' + - name: initial_access_resistance + dtype: text + doc: Initial access resistance. + quantity: '?' + - name: location + dtype: text + doc: Location of the electrode. Specify the area, layer, comments on estimation + of area/layer, stereotaxic coordinates if in vivo, etc. Use standard atlas + names for anatomical regions when possible. + quantity: '?' + - name: resistance + dtype: text + doc: Electrode resistance, in ohms. + quantity: '?' + - name: seal + dtype: text + doc: Information about seal used for recording. + quantity: '?' + - name: slice + dtype: text + doc: Information about slice used for recording. + quantity: '?' + links: + - name: device + target_type: Device + doc: Device that was used to record from this electrode. + +- neurodata_type_def: SweepTable + neurodata_type_inc: DynamicTable + doc: '[DEPRECATED] Table used to group different PatchClampSeries. SweepTable + is being replaced by IntracellularRecordingsTable and SimultaneousRecordingsTable + tables. Additional SequentialRecordingsTable, RepetitionsTable, and + ExperimentalConditions tables provide enhanced support for experiment metadata.' + datasets: + - name: sweep_number + neurodata_type_inc: VectorData + dtype: uint32 + doc: Sweep number of the PatchClampSeries in that row. + - name: series + neurodata_type_inc: VectorData + dtype: + target_type: PatchClampSeries + reftype: object + doc: The PatchClampSeries with the sweep number in that row. + - name: series_index + neurodata_type_inc: VectorIndex + doc: Index for series. + +- neurodata_type_def: IntracellularElectrodesTable + neurodata_type_inc: DynamicTable + doc: Table for storing intracellular electrode related metadata. + attributes: + - name: description + dtype: text + value: Table for storing intracellular electrode related metadata. + doc: Description of what is in this dynamic table. + datasets: + - name: electrode + neurodata_type_inc: VectorData + dtype: + target_type: IntracellularElectrode + reftype: object + doc: Column for storing the reference to the intracellular electrode. + +- neurodata_type_def: IntracellularStimuliTable + neurodata_type_inc: DynamicTable + doc: Table for storing intracellular stimulus related metadata. + attributes: + - name: description + dtype: text + value: Table for storing intracellular stimulus related metadata. + doc: Description of what is in this dynamic table. + datasets: + - name: stimulus + neurodata_type_inc: TimeSeriesReferenceVectorData + doc: Column storing the reference to the recorded stimulus for the recording (rows). + - name: stimulus_template + neurodata_type_inc: TimeSeriesReferenceVectorData + doc: Column storing the reference to the stimulus template for the recording (rows). + quantity: '?' + +- neurodata_type_def: IntracellularResponsesTable + neurodata_type_inc: DynamicTable + doc: Table for storing intracellular response related metadata. + attributes: + - name: description + dtype: text + value: Table for storing intracellular response related metadata. + doc: Description of what is in this dynamic table. + datasets: + - name: response + neurodata_type_inc: TimeSeriesReferenceVectorData + doc: Column storing the reference to the recorded response for the recording (rows) + +- neurodata_type_def: IntracellularRecordingsTable + neurodata_type_inc: AlignedDynamicTable + name: intracellular_recordings + doc: A table to group together a stimulus and response from a single electrode and + a single simultaneous recording. Each row in the table represents a single recording + consisting typically of a stimulus and a corresponding response. In some cases, + however, only a stimulus or a response is recorded as part of an experiment. + In this case, both the stimulus and response will point to the same TimeSeries + while the idx_start and count of the invalid column will be set to -1, thus, indicating + that no values have been recorded for the stimulus or response, respectively. + Note, a recording MUST contain at least a stimulus or a response. Typically the + stimulus and response are PatchClampSeries. However, the use of AD/DA channels + that are not associated to an electrode is also common in intracellular electrophysiology, + in which case other TimeSeries may be used. + attributes: + - name: description + dtype: text + value: A table to group together a stimulus and response from a single electrode + and a single simultaneous recording and for storing metadata about the intracellular + recording. + doc: Description of the contents of this table. Inherited from AlignedDynamicTable + and overwritten here to fix the value of the attribute. + groups: + - name: electrodes + neurodata_type_inc: IntracellularElectrodesTable + doc: Table for storing intracellular electrode related metadata. + - name: stimuli + neurodata_type_inc: IntracellularStimuliTable + doc: Table for storing intracellular stimulus related metadata. + - name: responses + neurodata_type_inc: IntracellularResponsesTable + doc: Table for storing intracellular response related metadata. + +- neurodata_type_def: SimultaneousRecordingsTable + neurodata_type_inc: DynamicTable + name: simultaneous_recordings + doc: A table for grouping different intracellular recordings from the IntracellularRecordingsTable + table together that were recorded simultaneously from different electrodes. + datasets: + - name: recordings + neurodata_type_inc: DynamicTableRegion + doc: A reference to one or more rows in the IntracellularRecordingsTable table. + attributes: + - name: table + dtype: + target_type: IntracellularRecordingsTable + reftype: object + doc: Reference to the IntracellularRecordingsTable table that this table region + applies to. This specializes the attribute inherited from DynamicTableRegion + to fix the type of table that can be referenced here. + - name: recordings_index + neurodata_type_inc: VectorIndex + doc: Index dataset for the recordings column. + +- neurodata_type_def: SequentialRecordingsTable + neurodata_type_inc: DynamicTable + name: sequential_recordings + doc: A table for grouping different sequential recordings from the SimultaneousRecordingsTable + table together. This is typically used to group together sequential recordings + where a sequence of stimuli of the same type with varying parameters have + been presented in a sequence. + datasets: + - name: simultaneous_recordings + neurodata_type_inc: DynamicTableRegion + doc: A reference to one or more rows in the SimultaneousRecordingsTable table. + attributes: + - name: table + dtype: + target_type: SimultaneousRecordingsTable + reftype: object + doc: Reference to the SimultaneousRecordingsTable table that this table region + applies to. This specializes the attribute inherited from DynamicTableRegion + to fix the type of table that can be referenced here. + - name: simultaneous_recordings_index + neurodata_type_inc: VectorIndex + doc: Index dataset for the simultaneous_recordings column. + - name: stimulus_type + neurodata_type_inc: VectorData + dtype: text + doc: The type of stimulus used for the sequential recording. + +- neurodata_type_def: RepetitionsTable + neurodata_type_inc: DynamicTable + name: repetitions + doc: A table for grouping different sequential intracellular recordings together. + With each SequentialRecording typically representing a particular type of stimulus, + the RepetitionsTable table is typically used to group sets of stimuli applied + in sequence. + datasets: + - name: sequential_recordings + neurodata_type_inc: DynamicTableRegion + doc: A reference to one or more rows in the SequentialRecordingsTable table. + attributes: + - name: table + dtype: + target_type: SequentialRecordingsTable + reftype: object + doc: Reference to the SequentialRecordingsTable table that this table region + applies to. This specializes the attribute inherited from DynamicTableRegion + to fix the type of table that can be referenced here. + - name: sequential_recordings_index + neurodata_type_inc: VectorIndex + doc: Index dataset for the sequential_recordings column. + +- neurodata_type_def: ExperimentalConditionsTable + neurodata_type_inc: DynamicTable + name: experimental_conditions + doc: A table for grouping different intracellular recording repetitions together + that belong to the same experimental condition. + datasets: + - name: repetitions + neurodata_type_inc: DynamicTableRegion + doc: A reference to one or more rows in the RepetitionsTable table. + attributes: + - name: table + dtype: + target_type: RepetitionsTable + reftype: object + doc: Reference to the RepetitionsTable table that this table region applies + to. This specializes the attribute inherited from DynamicTableRegion to fix + the type of table that can be referenced here. + - name: repetitions_index + neurodata_type_inc: VectorIndex + doc: Index dataset for the repetitions column. diff --git a/nwb-schema/2.10.0/core/nwb.image.yaml b/nwb-schema/2.10.0/core/nwb.image.yaml new file mode 100644 index 000000000..7fecfa125 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.image.yaml @@ -0,0 +1,229 @@ +datasets: +- neurodata_type_def: GrayscaleImage + neurodata_type_inc: Image + dims: + - x + - y + shape: + - null + - null + doc: A grayscale image. + dtype: numeric + +- neurodata_type_def: RGBImage + neurodata_type_inc: Image + dims: + - x + - y + - r, g, b + shape: + - null + - null + - 3 + doc: A color image. + dtype: numeric + +- neurodata_type_def: RGBAImage + neurodata_type_inc: Image + dims: + - x + - y + - r, g, b, a + shape: + - null + - null + - 4 + doc: A color image with transparency. + dtype: numeric + +groups: +- neurodata_type_def: ImageSeries + neurodata_type_inc: TimeSeries + doc: General image data that is common between acquisition and stimulus time series. + Sometimes the image data is stored in the file in a raw format while other + times it will be stored as a series of external image files in the host file system. + The data field will either be binary data, if the data is stored in the NWB file, or + empty, if the data is stored in an external image stack. [frame][x][y] or [frame][x][y][z]. + datasets: + - name: data + dtype: numeric + dims: + - - frame + - x + - y + - - frame + - x + - y + - z + shape: + - - null + - null + - null + - - null + - null + - null + - null + doc: Binary data representing images across frames. If data are stored in an external + file, this should be an empty 3D array. + - name: dimension + dtype: int32 + dims: + - rank + shape: + - null + doc: Number of pixels on x, y, (and z) axes. + quantity: '?' + - name: external_file + dtype: text + dims: + - num_files + shape: + - null + doc: Paths to one or more external file(s). The field is only present if format='external'. + This is only relevant if the image series is stored in the file system as one + or more image file(s). This field should NOT be used if the image is stored + in another NWB file and that file is linked to this file. + quantity: '?' + attributes: + - name: starting_frame + dtype: int32 + dims: + - num_files + shape: + - null + doc: Each external image may contain one or more consecutive frames of the full + ImageSeries. This attribute serves as an index to indicate which frames each file + contains, to facilitate random access. The 'starting_frame' attribute, hence, + contains a list of frame numbers within the full ImageSeries of the first frame + of each file listed in the parent 'external_file' dataset. Zero-based indexing is + used (hence, the first element will always be zero). For example, if the + 'external_file' dataset has three paths to files and the first file has 5 frames, + the second file has 10 frames, and the third file has 20 frames, then this + attribute will have values [0, 5, 15]. If there is a single external file that + holds all of the frames of the ImageSeries (and so there is a single element in + the 'external_file' dataset), then this attribute should have value [0]. + - name: format + dtype: text + default_value: raw + doc: Format of image. If this is 'external', then the attribute 'external_file' + contains the path information to the image files. If this is 'raw', then the raw + (single-channel) binary data is stored in the 'data' dataset. If this attribute + is not present, then the default format='raw' case is assumed. + quantity: '?' + links: + - name: device + target_type: Device + doc: Link to the Device object that was used to capture these images. + quantity: '?' + +- neurodata_type_def: ImageMaskSeries + neurodata_type_inc: ImageSeries + doc: DEPRECATED. An alpha mask that is applied to a presented visual stimulus. The 'data' array + contains an array of mask values that are applied to the displayed image. Mask + values are stored as RGBA. Mask can vary with time. The timestamps array indicates + the starting time of a mask, and that mask pattern continues until it's explicitly + changed. + links: + - name: masked_imageseries + target_type: ImageSeries + doc: Link to ImageSeries object that this image mask is applied to. + +- neurodata_type_def: OpticalSeries + neurodata_type_inc: ImageSeries + doc: Image data that is presented or recorded. A stimulus template movie will be + stored only as an image. When the image is presented as stimulus, additional data + is required, such as field of view (e.g., how much of the visual field the image + covers, or how what is the area of the target being imaged). If the OpticalSeries + represents acquired imaging data, orientation is also important. + datasets: + - name: distance + dtype: float32 + doc: Distance from camera/monitor to target/eye. + quantity: '?' + - name: field_of_view + dtype: float32 + dims: + - - width, height + - - width, height, depth + shape: + - - 2 + - - 3 + doc: Width, height and depth of image, or imaged area, in meters. + quantity: '?' + - name: data + dtype: numeric + dims: + - - frame + - x + - y + - - frame + - x + - y + - r, g, b + shape: + - - null + - null + - null + - - null + - null + - null + - 3 + doc: Images presented to subject, either grayscale or RGB + - name: orientation + dtype: text + doc: Description of image relative to some reference frame (e.g., which way is + up). Must also specify frame of reference. + quantity: '?' + +- neurodata_type_def: IndexSeries + neurodata_type_inc: TimeSeries + doc: Stores indices that reference images defined in other containers. The primary purpose + of the IndexSeries is to allow images stored in an Images container to be referenced in a + specific sequence through the 'indexed_images' link. This approach avoids duplicating image data + when the same image needs to be presented multiple times or when images need to be shown in a + different order than they are stored. Since images in an Images container do not have an inherent + order, the Images container needs to include an 'order_of_images' dataset (of type + ImageReferences) when being referenced by an IndexSeries. This dataset establishes the ordered + sequence that the indices in IndexSeries refer to. The 'data' field stores the index into this + ordered sequence, and the 'timestamps' array indicates the precise presentation time of each + indexed image during an experiment. This can be used for displaying individual images or creating + movie segments by referencing a sequence of images with the appropriate timestamps. While + IndexSeries can also reference frames from an ImageSeries through the 'indexed_timeseries' link, + this usage is discouraged and will be deprecated in favor of using Images containers with + 'order_of_images'. + datasets: + - name: data + dtype: uint32 + dims: + - num_times + shape: + - null + doc: Index of the image (using zero-indexing) in the linked Images object. + attributes: + - name: conversion + dtype: float32 + doc: This field is unused by IndexSeries. + required: false + - name: resolution + dtype: float32 + doc: This field is unused by IndexSeries. + required: false + - name: offset + dtype: float32 + doc: This field is unused by IndexSeries. + required: false + - name: unit + dtype: text + value: N/A + doc: This field is unused by IndexSeries and has the value N/A. + links: + - name: indexed_timeseries + target_type: ImageSeries + doc: Link to ImageSeries object containing images that are indexed. Use of this link + is discouraged and will be deprecated. Link to an Images type instead. + quantity: '?' + - name: indexed_images + target_type: Images + doc: Link to Images object containing an ordered set of images that are indexed. The Images object + must contain a 'ordered_images' dataset specifying the order of the images in the Images type. + quantity: '?' diff --git a/nwb-schema/2.10.0/core/nwb.misc.yaml b/nwb-schema/2.10.0/core/nwb.misc.yaml new file mode 100644 index 000000000..2d01fa82f --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.misc.yaml @@ -0,0 +1,336 @@ +groups: +- neurodata_type_def: AbstractFeatureSeries + neurodata_type_inc: TimeSeries + doc: Abstract features, such as quantitative descriptions of sensory stimuli. The + TimeSeries::data field is a 2D array, storing those features (e.g., for visual + grating stimulus this might be orientation, spatial frequency and contrast). Null + stimuli (eg, uniform gray) can be marked as being an independent feature (eg, + 1.0 for gray, 0.0 for actual stimulus) or by storing NaNs for feature values, + or through use of the TimeSeries::control fields. A set of features is considered + to persist until the next set of features is defined. The final set of features + stored should be the null set. This is useful when storing the raw stimulus + is impractical. + datasets: + - name: data + dtype: numeric + dims: + - - num_times + - - num_times + - num_features + shape: + - - null + - - null + - null + doc: Values of each feature at each time. + attributes: + - name: unit + dtype: text + default_value: see 'feature_units' + doc: Since there can be different units for different features, store the units + in 'feature_units'. The default value for this attribute is "see 'feature_units'". + required: false + - name: feature_units + dtype: text + dims: + - num_features + shape: + - null + doc: Units of each feature. + quantity: '?' + - name: features + dtype: text + dims: + - num_features + shape: + - null + doc: Description of the features represented in TimeSeries::data. + +- neurodata_type_def: AnnotationSeries + neurodata_type_inc: TimeSeries + doc: Stores user annotations made during an experiment. The data[] + field stores a text array, and timestamps are stored for each annotation (ie, + interval=1). This is largely an alias to a standard TimeSeries storing a text + array but that is identifiable as storing annotations in a machine-readable way. + datasets: + - name: data + dtype: text + dims: + - num_times + shape: + - null + doc: Annotations made during an experiment. + attributes: + - name: resolution + dtype: float32 + value: -1.0 + doc: Smallest meaningful difference between values in data. Annotations have + no units, so the value is fixed to -1.0. + - name: unit + dtype: text + value: n/a + doc: Base unit of measurement for working with the data. Annotations have + no units, so the value is fixed to 'n/a'. + +- neurodata_type_def: IntervalSeries + neurodata_type_inc: TimeSeries + doc: Stores intervals of data. The timestamps field stores the beginning and end + of intervals. The data field stores whether the interval just started (>0 value) + or ended (<0 value). Different interval types can be represented in the same series + by using multiple key values (eg, 1 for feature A, 2 for feature B, 3 for feature + C, etc). The field data stores an 8-bit integer. This is largely an alias of a + standard TimeSeries but that is identifiable as representing time intervals in + a machine-readable way. + datasets: + - name: data + dtype: int8 + dims: + - num_times + shape: + - null + doc: Use values >0 if interval started, <0 if interval ended. + attributes: + - name: resolution + dtype: float32 + value: -1.0 + doc: Smallest meaningful difference between values in data. Annotations have + no units, so the value is fixed to -1.0. + - name: unit + dtype: text + value: n/a + doc: Base unit of measurement for working with the data. Annotations have + no units, so the value is fixed to 'n/a'. + +- neurodata_type_def: FrequencyBandsTable + neurodata_type_inc: DynamicTable + doc: Table for describing the bands that DecompositionSeries was generated from. There + should be one row in this table for each band. + datasets: + - name: band_name + neurodata_type_inc: VectorData + dtype: text + doc: Name of the band, e.g. theta. + - name: band_limits + neurodata_type_inc: VectorData + dtype: float32 + dims: + - num_bands + - low, high + shape: + - null + - 2 + doc: Low and high limit of each band in Hz. If it is a Gaussian filter, use + 2 SD on either side of the center. + - name: band_mean + neurodata_type_inc: VectorData + dtype: float32 + dims: + - num_bands + shape: + - null + doc: The mean Gaussian filters, in Hz. + quantity: '?' + - name: band_stdev + neurodata_type_inc: VectorData + dtype: float32 + dims: + - num_bands + shape: + - null + doc: The standard deviation of Gaussian filters, in Hz. + quantity: '?' + +- neurodata_type_def: DecompositionSeries + neurodata_type_inc: TimeSeries + doc: Spectral analysis of a time series, e.g. of an LFP or a speech signal. + datasets: + - name: data + dtype: numeric + dims: + - num_times + - num_channels + - num_bands + shape: + - null + - null + - null + doc: Data decomposed into frequency bands. + attributes: + - name: unit + dtype: text + default_value: no unit + doc: Base unit of measurement for working with the data. Actual stored values are + not necessarily stored in these units. To access the data in these units, + multiply 'data' by 'conversion'. + - name: metric + dtype: text + doc: The metric used, e.g. phase, amplitude, power. + - name: source_channels + neurodata_type_inc: DynamicTableRegion + doc: DynamicTableRegion pointer to the channels that this decomposition series was generated from. + quantity: '?' + groups: + - name: bands + neurodata_type_inc: FrequencyBandsTable + doc: Table for describing the bands that this series was generated from. + quantity: '?' + links: + - name: source_timeseries + target_type: TimeSeries + doc: Link to TimeSeries object that this data was calculated from. Metadata about + electrodes and their position can be read from that ElectricalSeries so it is + not necessary to store that information here. + quantity: '?' + +- neurodata_type_def: Units + neurodata_type_inc: DynamicTable + default_name: Units + doc: Data about spiking units. Event times of observed units (e.g. cell, synapse, + etc.) should be concatenated and stored in spike_times. + datasets: + - name: spike_times_index + neurodata_type_inc: VectorIndex + doc: Index into the spike_times dataset. + quantity: '?' + - name: spike_times + neurodata_type_inc: VectorData + dtype: float64 + doc: Spike times for each unit in seconds. + quantity: '?' + attributes: + - name: resolution + dtype: float64 + doc: The smallest possible difference between two spike times. Usually 1 divided by the acquisition sampling rate + from which spike times were extracted, but could be larger if the acquisition time series was downsampled or + smaller if the acquisition time series was smoothed/interpolated and it is possible for the spike time to be + between samples. + required: false + - name: obs_intervals_index + neurodata_type_inc: VectorIndex + doc: Index into the obs_intervals dataset. + quantity: '?' + - name: obs_intervals + neurodata_type_inc: VectorData + dtype: float64 + dims: + - num_intervals + - start|end + shape: + - null + - 2 + doc: Observation intervals for each unit. + quantity: '?' + - name: electrodes_index + neurodata_type_inc: VectorIndex + doc: Index into electrodes. + quantity: '?' + - name: electrodes + neurodata_type_inc: DynamicTableRegion + doc: Electrode that each spike unit came from, specified using a DynamicTableRegion. + quantity: '?' + - name: electrode_group + neurodata_type_inc: VectorData + dtype: + target_type: ElectrodeGroup + reftype: object + doc: Electrode group that each spike unit came from. + quantity: '?' + - name: waveform_mean + neurodata_type_inc: VectorData + dtype: float32 + dims: + - - num_units + - num_samples + - - num_units + - num_samples + - num_electrodes + shape: + - - null + - null + - - null + - null + - null + doc: Spike waveform mean for each spike unit. + quantity: '?' + attributes: + - name: sampling_rate + dtype: float32 + doc: Sampling rate, in hertz. + required: false + - name: unit + dtype: text + value: volts + doc: Unit of measurement. This value is fixed to 'volts'. + required: false + - name: waveform_sd + neurodata_type_inc: VectorData + dtype: float32 + dims: + - - num_units + - num_samples + - - num_units + - num_samples + - num_electrodes + shape: + - - null + - null + - - null + - null + - null + doc: Spike waveform standard deviation for each spike unit. + quantity: '?' + attributes: + - name: sampling_rate + dtype: float32 + doc: Sampling rate, in hertz. + required: false + - name: unit + dtype: text + value: volts + doc: Unit of measurement. This value is fixed to 'volts'. + required: false + - name: waveforms + neurodata_type_inc: VectorData + dtype: numeric + dims: + - num_waveforms + - num_samples + shape: + - null + - null + doc: "Individual waveforms for each spike on each electrode. This is a doubly indexed column. The 'waveforms_index' + column indexes which waveforms in this column belong to the same spike event for a given unit, where each waveform + was recorded from a different electrode. The 'waveforms_index_index' column indexes the 'waveforms_index' column + to indicate which spike events belong to a given unit. For example, if the + 'waveforms_index_index' column has values [2, 5, 6], then the first 2 elements of the 'waveforms_index' column + correspond to the 2 spike events of the first unit, the next 3 elements of the 'waveforms_index' column correspond + to the 3 spike events of the second unit, and the next 1 element of the 'waveforms_index' column corresponds to + the 1 spike event of the third unit. If the 'waveforms_index' column has values [3, 6, 8, 10, 12, 13], then + the first 3 elements of the 'waveforms' column contain the 3 spike waveforms that were recorded from 3 different + electrodes for the first spike time of the first unit. See + https://nwb-schema.readthedocs.io/en/stable/format_description.html#doubly-ragged-arrays for a graphical + representation of this example. When there is only one electrode for each unit (i.e., each spike time is + associated with a single waveform), then the 'waveforms_index' column will have values 1, 2, ..., N, where N is + the number of spike events. The number of electrodes for each spike event should be the same within a given unit. + The 'electrodes' column should be used to indicate which electrodes are associated with each unit, and the order + of the waveforms within a given unit x spike event should be the same as the order of the electrodes referenced in + the 'electrodes' column of this table. The number of samples for each waveform must be the same." + quantity: '?' + attributes: + - name: sampling_rate + dtype: float32 + doc: Sampling rate, in hertz. + required: false + - name: unit + dtype: text + value: volts + doc: Unit of measurement. This value is fixed to 'volts'. + required: false + - name: waveforms_index + neurodata_type_inc: VectorIndex + doc: Index into the 'waveforms' dataset. One value for every spike event. See 'waveforms' for more detail. + quantity: '?' + - name: waveforms_index_index + neurodata_type_inc: VectorIndex + doc: Index into the 'waveforms_index' dataset. One value for every unit (row in the table). See 'waveforms' for more + detail. + quantity: '?' diff --git a/nwb-schema/2.10.0/core/nwb.namespace.yaml b/nwb-schema/2.10.0/core/nwb.namespace.yaml new file mode 100644 index 000000000..1c02691e0 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.namespace.yaml @@ -0,0 +1,60 @@ +namespaces: +- name: core + doc: NWB namespace + author: + - Andrew Tritt + - Oliver Ruebel + - Ryan Ly + - Ben Dichter + - Keith Godfrey + - Jeff Teeters + contact: + - ajtritt@lbl.gov + - oruebel@lbl.gov + - rly@lbl.gov + - bdichter@lbl.gov + - keithg@alleninstitute.org + - jteeters@berkeley.edu + full_name: NWB core + schema: + - namespace: hdmf-common + - doc: This source module contains base data types used throughout the NWB data + format. + source: nwb.base.yaml + title: Base data types + - doc: This source module contains neurodata_types for device data. + source: nwb.device.yaml + title: Devices + - doc: This source module contains neurodata_types for epoch data. + source: nwb.epoch.yaml + title: Epochs + - doc: This source module contains neurodata_types for image data. + source: nwb.image.yaml + title: Image data + - doc: Main NWB file specification. + source: nwb.file.yaml + title: NWB file + - doc: Miscellaneous types. + source: nwb.misc.yaml + title: Miscellaneous neurodata_types. + - doc: This source module contains neurodata_types for behavior data. + source: nwb.behavior.yaml + title: Behavior + - doc: This source module contains neurodata_types for extracellular electrophysiology + data. + source: nwb.ecephys.yaml + title: Extracellular electrophysiology + - doc: This source module contains neurodata_types for intracellular electrophysiology + data. + source: nwb.icephys.yaml + title: Intracellular electrophysiology + - doc: This source module contains neurodata_types for opto-genetics data. + source: nwb.ogen.yaml + title: Optogenetics + - doc: This source module contains neurodata_types for optical physiology data. + source: nwb.ophys.yaml + title: Optical physiology + - doc: This source module contains neurodata_type for retinotopy data. + source: nwb.retinotopy.yaml + title: Retinotopy + version: "2.10.0-alpha" diff --git a/nwb-schema/2.10.0/core/nwb.ogen.yaml b/nwb-schema/2.10.0/core/nwb.ogen.yaml new file mode 100644 index 000000000..419d6190e --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.ogen.yaml @@ -0,0 +1,48 @@ +groups: +- neurodata_type_def: OptogeneticSeries + neurodata_type_inc: TimeSeries + doc: An optogenetic stimulus. + datasets: + - name: data + dtype: numeric + dims: + - - num_times + - - num_times + - num_rois + shape: + - - null + - - null + - null + doc: Applied power for optogenetic stimulus, in watts. Shape can be 1D or 2D. + 2D data is meant to be used in an extension of OptogeneticSeries that + defines what the second dimension represents. + attributes: + - name: unit + dtype: text + value: watts + doc: Unit of measurement for data, which is fixed to 'watts'. + links: + - name: site + target_type: OptogeneticStimulusSite + doc: Link to OptogeneticStimulusSite object that describes the site to which this + stimulus was applied. + +- neurodata_type_def: OptogeneticStimulusSite + neurodata_type_inc: NWBContainer + doc: A site of optogenetic stimulation. + datasets: + - name: description + dtype: text + doc: Description of stimulation site. + - name: excitation_lambda + dtype: float32 + doc: Excitation wavelength, in nm. + - name: location + dtype: text + doc: Location of the stimulation site. Specify the area, layer, comments on estimation + of area/layer, stereotaxic coordinates if in vivo, etc. Use standard atlas + names for anatomical regions when possible. + links: + - name: device + target_type: Device + doc: Device that generated the stimulus. diff --git a/nwb-schema/2.10.0/core/nwb.ophys.yaml b/nwb-schema/2.10.0/core/nwb.ophys.yaml new file mode 100644 index 000000000..65dc12c3b --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.ophys.yaml @@ -0,0 +1,364 @@ +groups: +- neurodata_type_def: OnePhotonSeries + neurodata_type_inc: ImageSeries + doc: Image stack recorded over time from 1-photon microscope. + attributes: + - name: pmt_gain + dtype: float32 + doc: Photomultiplier gain. + required: false + - name: scan_line_rate + dtype: float32 + doc: Lines imaged per second. This is also stored in /general/optophysiology but + is kept here as it is useful information for analysis, and so good to be stored + w/ the actual data. + required: false + - name: exposure_time + dtype: float32 + doc: Exposure time of the sample; often the inverse of the frequency. + required: false + - name: binning + dtype: uint8 + doc: Amount of pixels combined into 'bins'; could be 1, 2, 4, 8, etc. + required: false + - name: power + dtype: float32 + doc: Power of the excitation in mW, if known. + required: false + - name: intensity + dtype: float32 + doc: Intensity of the excitation in mW/mm^2, if known. + required: false + links: + - name: imaging_plane + target_type: ImagingPlane + doc: Link to ImagingPlane object from which this TimeSeries data was generated. + +- neurodata_type_def: TwoPhotonSeries + neurodata_type_inc: ImageSeries + doc: Image stack recorded over time from 2-photon microscope. + attributes: + - name: pmt_gain + dtype: float32 + doc: Photomultiplier gain. + required: false + - name: scan_line_rate + dtype: float32 + doc: Lines imaged per second. This is also stored in /general/optophysiology but + is kept here as it is useful information for analysis, and so good to be stored + w/ the actual data. + required: false + datasets: + - name: field_of_view + dtype: float32 + dims: + - - width|height + - - width|height|depth + shape: + - - 2 + - - 3 + doc: Width, height and depth of image, or imaged area, in meters. + quantity: '?' + links: + - name: imaging_plane + target_type: ImagingPlane + doc: Link to ImagingPlane object from which this TimeSeries data was generated. + +- neurodata_type_def: RoiResponseSeries + neurodata_type_inc: TimeSeries + doc: ROI responses over an imaging plane. The first dimension represents time. + The second dimension, if present, represents ROIs. + datasets: + - name: data + dtype: numeric + dims: + - - num_times + - - num_times + - num_ROIs + shape: + - - null + - - null + - null + doc: Signals from ROIs. + - name: rois + neurodata_type_inc: DynamicTableRegion + doc: DynamicTableRegion referencing into an ROITable containing information on the ROIs + stored in this timeseries. + +- neurodata_type_def: DfOverF + neurodata_type_inc: NWBDataInterface + default_name: DfOverF + doc: dF/F information about a region of interest (ROI). Storage hierarchy of dF/F + should be the same as for segmentation (i.e., same names for ROIs and for image + planes). + groups: + - neurodata_type_inc: RoiResponseSeries + doc: RoiResponseSeries object(s) containing dF/F for a ROI. + quantity: '+' + +- neurodata_type_def: Fluorescence + neurodata_type_inc: NWBDataInterface + default_name: Fluorescence + doc: Fluorescence information about a region of interest (ROI). Storage hierarchy + of fluorescence should be the same as for segmentation (ie, same names for ROIs + and for image planes). + groups: + - neurodata_type_inc: RoiResponseSeries + doc: RoiResponseSeries object(s) containing fluorescence data for a ROI. + quantity: '+' + +- neurodata_type_def: ImageSegmentation + neurodata_type_inc: NWBDataInterface + default_name: ImageSegmentation + doc: Stores pixels in an image that represent different regions of interest (ROIs) + or masks. All segmentation for a given imaging plane is stored together, with + storage for multiple imaging planes (masks) supported. Each ROI is stored in its + own subgroup, with the ROI group containing both a 2D mask and a list of pixels + that make up this mask. Segments can also be used for masking neuropil. If segmentation + is allowed to change with time, a new imaging plane (or module) is required and + ROI names should remain consistent between them. + groups: + - neurodata_type_inc: PlaneSegmentation + doc: Results from image segmentation of a specific imaging plane. + quantity: '+' + +- neurodata_type_def: PlaneSegmentation + neurodata_type_inc: DynamicTable + doc: Results from image segmentation of a specific imaging plane. + At least one of `image_mask`, `pixel_mask`, or `voxel_mask` is required. + datasets: + - name: image_mask + neurodata_type_inc: VectorData + dims: + - - num_roi + - num_x + - num_y + - - num_roi + - num_x + - num_y + - num_z + shape: + - - null + - null + - null + - - null + - null + - null + - null + doc: ROI masks for each ROI. Each image mask is the size of the original imaging + plane (or volume) and members of the ROI are finite non-zero. + At least one of `image_mask`, `pixel_mask`, or `voxel_mask` is required. + quantity: '?' + - name: pixel_mask_index + neurodata_type_inc: VectorIndex + doc: Index into pixel_mask. + quantity: '?' + - name: pixel_mask + neurodata_type_inc: VectorData + dtype: + - name: x + dtype: uint32 + doc: Pixel x-coordinate. + - name: y + dtype: uint32 + doc: Pixel y-coordinate. + - name: weight + dtype: float32 + doc: Weight of the pixel. + doc: 'Pixel masks for each ROI: a list of indices and weights for the ROI. Pixel + masks are concatenated and parsing of this dataset is maintained by the PlaneSegmentation. + At least one of `image_mask`, `pixel_mask`, or `voxel_mask` is required.' + quantity: '?' + - name: voxel_mask_index + neurodata_type_inc: VectorIndex + doc: Index into voxel_mask. + quantity: '?' + - name: voxel_mask + neurodata_type_inc: VectorData + dtype: + - name: x + dtype: uint32 + doc: Voxel x-coordinate. + - name: y + dtype: uint32 + doc: Voxel y-coordinate. + - name: z + dtype: uint32 + doc: Voxel z-coordinate. + - name: weight + dtype: float32 + doc: Weight of the voxel. + doc: 'Voxel masks for each ROI: a list of indices and weights for the ROI. Voxel + masks are concatenated and parsing of this dataset is maintained by the PlaneSegmentation. + At least one of `image_mask`, `pixel_mask`, or `voxel_mask` is required.' + quantity: '?' + groups: + - name: reference_images + doc: Image stacks that the segmentation masks apply to. + groups: + - neurodata_type_inc: ImageSeries + doc: One or more image stacks that the masks apply to (can be one-element + stack). + quantity: '*' + links: + - name: imaging_plane + target_type: ImagingPlane + doc: Link to ImagingPlane object from which this data was generated. + +- neurodata_type_def: ImagingPlane + neurodata_type_inc: NWBContainer + doc: An imaging plane and its metadata. + datasets: + - name: description + dtype: text + doc: Description of the imaging plane. + quantity: '?' + - name: excitation_lambda + dtype: float32 + doc: Excitation wavelength, in nm. + - name: imaging_rate + dtype: float32 + doc: Rate that images are acquired, in Hz. If the corresponding TimeSeries is present, the rate should be stored + there instead. + quantity: '?' + - name: indicator + dtype: text + doc: Calcium indicator. + - name: location + dtype: text + doc: Location of the imaging plane. Specify the area, layer, comments on estimation + of area/layer, stereotaxic coordinates if in vivo, etc. Use standard atlas + names for anatomical regions when possible. + - name: manifold + dtype: float32 + dims: + - - height + - width + - x, y, z + - - height + - width + - depth + - x, y, z + shape: + - - null + - null + - 3 + - - null + - null + - null + - 3 + doc: "DEPRECATED Physical position of each pixel. 'xyz' represents the position\ + \ of the pixel relative to the defined coordinate space. Deprecated in favor of origin_coords and grid_spacing." + quantity: '?' + attributes: + - name: conversion + dtype: float32 + default_value: 1.0 + doc: Scalar to multiply each element in data to convert it to the specified 'unit'. + If the data are stored in acquisition system units or other units + that require a conversion to be interpretable, multiply the data by 'conversion' + to convert the data to the specified 'unit'. e.g. if the data acquisition system + stores values in this object as pixels from x = -500 to 499, y = -500 to 499 + that correspond to a 2 m x 2 m range, then the 'conversion' multiplier to get + from raw data acquisition pixel units to meters is 2/1000. + required: false + - name: unit + dtype: text + default_value: meters + doc: Base unit of measurement for working with the data. The default value is 'meters'. + required: false + - name: origin_coords + dtype: float32 + dims: + - - x, y + - - x, y, z + shape: + - - 2 + - - 3 + doc: Physical location of the first element of the imaging plane (0, 0) for 2-D data or (0, 0, 0) for 3-D data. + See also reference_frame for what the physical location is relative to (e.g., bregma). + quantity: '?' + attributes: + - name: unit + dtype: text + default_value: meters + doc: Measurement units for origin_coords. The default value is 'meters'. + - name: grid_spacing + dtype: float32 + dims: + - - x, y + - - x, y, z + shape: + - - 2 + - - 3 + doc: Space between pixels in (x, y) or voxels in (x, y, z) directions, in the specified unit. + Assumes imaging plane is a regular grid. See also reference_frame to interpret the grid. + quantity: '?' + attributes: + - name: unit + dtype: text + default_value: meters + doc: Measurement units for grid_spacing. The default value is 'meters'. + - name: reference_frame + dtype: text + doc: Describes reference frame of origin_coords and grid_spacing. + For example, this can be a text description of the anatomical location and orientation of the grid + defined by origin_coords and grid_spacing or the vectors needed to transform or rotate the grid to + a common anatomical axis (e.g., AP/DV/ML). This field is necessary to interpret origin_coords and grid_spacing. + If origin_coords and grid_spacing are not present, then this field is not required. + For example, if the microscope takes 10 x 10 x 2 images, where the first value of the data matrix + (index (0, 0, 0)) corresponds to (-1.2, -0.6, -2) mm relative to bregma, the spacing between pixels is 0.2 mm in + x, 0.2 mm in y and 0.5 mm in z, and larger numbers in x means more anterior, larger numbers in y means more + rightward, and larger numbers in z means more ventral, then enter the following -- + origin_coords = (-1.2, -0.6, -2) + grid_spacing = (0.2, 0.2, 0.5) + reference_frame = "Origin coordinates are relative to bregma. First dimension corresponds to anterior-posterior + axis (larger index = more anterior). Second dimension corresponds to medial-lateral axis (larger index = more + rightward). Third dimension corresponds to dorsal-ventral axis (larger index = more ventral)." + quantity: '?' + groups: + - neurodata_type_inc: OpticalChannel + doc: An optical channel used to record from an imaging plane. + quantity: '+' + links: + - name: device + target_type: Device + doc: Link to the Device object that was used to record from this electrode. + +- neurodata_type_def: OpticalChannel + neurodata_type_inc: NWBContainer + doc: An optical channel used to record from an imaging plane. + datasets: + - name: description + dtype: text + doc: Description or other notes about the channel. + - name: emission_lambda + dtype: float32 + doc: Emission wavelength for channel, in nm. + +- neurodata_type_def: MotionCorrection + neurodata_type_inc: NWBDataInterface + default_name: MotionCorrection + doc: 'An image stack where all frames are shifted (registered) to a common coordinate + system, to account for movement and drift between frames. Note: each frame at + each point in time is assumed to be 2-D (has only x & y dimensions).' + groups: + - neurodata_type_inc: CorrectedImageStack + doc: Results from motion correction of an image stack. + quantity: '+' + +- neurodata_type_def: CorrectedImageStack + neurodata_type_inc: NWBDataInterface + doc: Results from motion correction of an image stack. + groups: + - name: corrected + neurodata_type_inc: ImageSeries + doc: Image stack with frames shifted to the common coordinates. + - name: xy_translation + neurodata_type_inc: TimeSeries + doc: Stores the x,y delta necessary to align each frame to the common coordinates, + for example, to align each frame to a reference image. + links: + - name: original + target_type: ImageSeries + doc: Link to ImageSeries object that is being registered. diff --git a/nwb-schema/2.10.0/core/nwb.retinotopy.yaml b/nwb-schema/2.10.0/core/nwb.retinotopy.yaml new file mode 100644 index 000000000..1cf984578 --- /dev/null +++ b/nwb-schema/2.10.0/core/nwb.retinotopy.yaml @@ -0,0 +1,234 @@ +groups: +- neurodata_type_def: ImagingRetinotopy + neurodata_type_inc: NWBDataInterface + default_name: ImagingRetinotopy + doc: 'DEPRECATED. Intrinsic signal optical imaging or widefield imaging for measuring + retinotopy. Stores orthogonal maps (e.g., altitude/azimuth; radius/theta) of responses + to specific stimuli and a combined polarity map from which to identify visual areas. + This group does not store the raw responses imaged during retinotopic mapping or the + stimuli presented, but rather the resulting phase and power maps after applying a Fourier + transform on the averaged responses. + Note: for data consistency, all images and arrays are stored in the format [row][column] + and [row, col], which equates to [y][x]. Field of view and dimension arrays may + appear backward (i.e., y before x).' + datasets: + - name: axis_1_phase_map + dtype: float32 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: Phase response to stimulus on the first measured axis. + attributes: + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: unit + dtype: text + doc: Unit that axis data is stored in (e.g., degrees). + - name: axis_1_power_map + dtype: float32 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: Power response on the first measured axis. Response is scaled so 0.0 is no + power in the response and 1.0 is maximum relative power. + quantity: '?' + attributes: + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: unit + dtype: text + doc: Unit that axis data is stored in (e.g., degrees). + - name: axis_2_phase_map + dtype: float32 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: Phase response to stimulus on the second measured axis. + attributes: + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: unit + dtype: text + doc: Unit that axis data is stored in (e.g., degrees). + - name: axis_2_power_map + dtype: float32 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: Power response on the second measured axis. Response is scaled so 0.0 is + no power in the response and 1.0 is maximum relative power. + quantity: '?' + attributes: + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: unit + dtype: text + doc: Unit that axis data is stored in (e.g., degrees). + - name: axis_descriptions + dtype: text + dims: + - axis_1, axis_2 + shape: + - 2 + doc: Two-element array describing the contents of the two response axis fields. + Description should be something like ['altitude', 'azimuth'] or '['radius', + 'theta']. + - name: focal_depth_image + dtype: uint16 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: 'Gray-scale image taken with same settings/parameters (e.g., focal depth, + wavelength) as data collection. Array format: [rows][columns].' + quantity: '?' + attributes: + - name: bits_per_pixel + dtype: int32 + doc: Number of bits used to represent each value. This is necessary to determine + maximum (white) pixel value. + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: focal_depth + dtype: float32 + doc: Focal depth offset, in meters. + - name: format + dtype: text + doc: Format of image. Right now only 'raw' is supported. + - name: sign_map + dtype: float32 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: Sine of the angle between the direction of the gradient in axis_1 and axis_2. + quantity: '?' + attributes: + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: vasculature_image + dtype: uint16 + dims: + - num_rows + - num_cols + shape: + - null + - null + doc: 'Gray-scale anatomical image of cortical surface. Array structure: [rows][columns]' + attributes: + - name: bits_per_pixel + dtype: int32 + doc: Number of bits used to represent each value. This is necessary to determine + maximum (white) pixel value + - name: dimension + dtype: int32 + dims: + - num_rows, num_cols + shape: + - 2 + doc: 'Number of rows and columns in the image. NOTE: row, column representation + is equivalent to height, width.' + - name: field_of_view + dtype: float32 + dims: + - height, width + shape: + - 2 + doc: Size of viewing area, in meters. + - name: format + dtype: text + doc: Format of image. Right now only 'raw' is supported. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.codespellrc b/nwb-schema/2.10.0/hdmf-common-schema/.codespellrc new file mode 100644 index 000000000..5aa4b5e75 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = .git,*.pdf,*.svg +# ignore-words-list = diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE.md b/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..deb12e8c4 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,13 @@ +## Summary of changes + +- ... + +## PR checklist for schema changes + + +- [ ] Update the version string in `docs/source/conf.py` and `common/namespace.yaml` to the next version with the suffix "-alpha" +- [ ] Add a new section in the release notes for the new version with the date "Upcoming" +- [ ] Add release notes for the PR to `docs/source/hdmf_common_release_notes.rst` and/or + `docs/source/hdmf_experimental_release_notes.rst` + + diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE/release.md b/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE/release.md new file mode 100644 index 000000000..ddb1debc8 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/PULL_REQUEST_TEMPLATE/release.md @@ -0,0 +1,29 @@ +Prepare for release of hdmf-common-schema [version] + +### Before merging: +- [ ] Update requirements versions as needed +- [ ] Update legal file dates and information in `Legal.txt`, `license.txt`, `README.md`, `docs/source/conf.py`, + and any other locations as needed +- [ ] Update `README.md` as needed +- [ ] Update the version string in `docs/source/conf.py` and `common/namespace.yaml` (remove "-alpha" suffix) +- [ ] Update `docs/source/conf.py` as needed +- [ ] Update release notes (set release date) in `docs/source/hdmf_common_release_notes.rst`, + `docs/source/hdmf_experimental_release_notes.rst`, and any other docs as needed +- [ ] Test docs locally (`cd docs; make fulldoc`) where the hdmf-common-schema submodule in the local version of HDMF + is fully up-to-date with the head of the main branch. +- [ ] Push changes to this PR and make sure all PRs to be included in this release have been merged +- [ ] Check that the readthedocs build for this PR succeeds (build latest to pull the new branch, then activate and + build docs for new branch): https://readthedocs.org/projects/hdmf-common-schema/builds/ + +### After merging: +1. Create a new git tag. Pull the latest master branch locally, run `git tag [version] --sign`, copy and paste the + release notes into the tag message, and run `git push --tags`. +2. On the [GitHub tags page](https://github.com/hdmf-dev/hdmf-common-schema/tags) page, + click "..." -> "Create release" for the new tag on the right side of the page. + Copy and paste the release notes into the release message, update the formatting if needed (reST to Markdown), + and set the title to the version string. +3. Check that the readthedocs "latest" and "stable" builds run and succeed. Delete the readthedocs build for the + merged PR. https://readthedocs.org/projects/hdmf-common-schema/builds/ +4. Update the HDMF submodule in the HDMF branch corresponding to this schema version to point to the tagged commit. + +See https://hdmf-common-schema.readthedocs.io/en/latest/software_process.html for more details. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/codespell.yml b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/codespell.yml new file mode 100644 index 000000000..243ba8ce5 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/codespell.yml @@ -0,0 +1,19 @@ +--- +name: Codespell + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Codespell + uses: codespell-project/actions-codespell@v1 diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/hdmf_compatibility_schema.yml b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/hdmf_compatibility_schema.yml new file mode 100644 index 000000000..212a82db6 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/hdmf_compatibility_schema.yml @@ -0,0 +1,28 @@ +name: Check HDMF Dev Compatibility + +on: [pull_request, workflow_dispatch] + +env: + HEAD_REF: ${{ github.head_ref }} + +jobs: + check_compatibility: + runs-on: ubuntu-latest + concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + steps: + - uses: actions/checkout@v6 + - name: Set up Python 3.14 + uses: actions/setup-python@v6 + with: + python-version: "3.14" + - name: Clone HDMF and Run HDMF tests + run: | + git clone https://github.com/hdmf-dev/hdmf.git --recurse-submodules + cd hdmf + python -m pip install -e ".[all]" + cd src/hdmf/common/hdmf-common-schema + git checkout $HEAD_REF # checkout branch + cd ../../../.. + pytest tests/unit/common/ diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/project_action.yml b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/project_action.yml new file mode 100644 index 000000000..ad2a9c73a --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/project_action.yml @@ -0,0 +1,34 @@ +name: Add issues to Development Project Board + +on: + issues: + types: + - opened + +jobs: + add-to-project: + name: Add issue to project + runs-on: ubuntu-latest + steps: + - name: GitHub App token + id: generate_token + uses: tibdex/github-app-token@v1.7.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PEM }} + + - name: Add to Developer Board + env: + TOKEN: ${{ steps.generate_token.outputs.token }} + uses: actions/add-to-project@v0.4.0 + with: + project-url: https://github.com/orgs/hdmf-dev/projects/7 + github-token: ${{ env.TOKEN }} + + - name: Add to Community Board + env: + TOKEN: ${{ steps.generate_token.outputs.token }} + uses: actions/add-to-project@v0.4.0 + with: + project-url: https://github.com/orgs/hdmf-dev/projects/8 + github-token: ${{ env.TOKEN }} diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/validate_schema.yaml b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/validate_schema.yaml new file mode 100644 index 000000000..f9ae464e1 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.github/workflows/validate_schema.yaml @@ -0,0 +1,21 @@ +name: Validate schema + +on: [push, pull_request, workflow_dispatch] + +jobs: + validate: + # run pipeline on either a push event or a PR event on a fork + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: Install dev branch of HDMF + run: | + pip install git+https://github.com/hdmf-dev/hdmf.git + - name: Validate schema specification + run: | + validate_hdmf_spec common -m hdmf-common.schema.json diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.gitignore b/nwb-schema/2.10.0/hdmf-common-schema/.gitignore new file mode 100644 index 000000000..14a93358e --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.gitignore @@ -0,0 +1,16 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Sphinx documentation +docs/_build/ + +# Autogenerated Sphinx sources +docs/source/_format_auto_docs/ + +# Jupyter Notebook +.ipynb_checkpoints + +#PyCharm +.idea/ diff --git a/nwb-schema/2.10.0/hdmf-common-schema/.readthedocs.yaml b/nwb-schema/2.10.0/hdmf-common-schema/.readthedocs.yaml new file mode 100644 index 000000000..ddb37307a --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/.readthedocs.yaml @@ -0,0 +1,23 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +build: + os: ubuntu-24.04 + tools: + python: '3.14' + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally set the version of Python and requirements required to build your docs +python: + install: + - requirements: requirements-doc.txt diff --git a/nwb-schema/2.10.0/hdmf-common-schema/Legal.txt b/nwb-schema/2.10.0/hdmf-common-schema/Legal.txt new file mode 100644 index 000000000..09aba929c --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/Legal.txt @@ -0,0 +1,5 @@ +“hdmf-common-schema” Copyright (c) 2019-2026, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. + +If you have questions about your rights to use or distribute this software, please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + +NOTICE. This Software was developed under funding from the U.S. Department of Energy and the U.S. Government consequently retains certain rights. As such, the U.S. Government has been granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software to reproduce, distribute copies to the public, prepare derivative works, and perform publicly and display publicly, and to permit other to do so. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/README.md b/nwb-schema/2.10.0/hdmf-common-schema/README.md new file mode 100644 index 000000000..b25332a69 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/README.md @@ -0,0 +1,50 @@ +# hdmf-common + +Documentation of the HDMF Common data format specification is available at +[https://hdmf-common-schema.readthedocs.io](https://hdmf-common-schema.readthedocs.io/en/stable/) + +## Citing HDMF Common Schema + +* **RRID:** HDMF Common Schema, RRID:SCR_021342 + +## Description + +The HDMF Common specification defines a collection of common, reusable data structures +that build the foundation for the modeling of advanced data formats, e.g., the +[Neurodata Without Borders (NWB)](https://www.nwb.org/) +neurophysiology data standard. The HDMF Common schema is integrated with [HDMF](https://github.com/hdmf-dev/hdmf), +which provides advanced APIs for reading, writing, and using HDMF-common data types. + +The HDMF-common schema provides the following data structures: + +- **DynamicTable**: A column-based table data structure that supports ragged columns and one-to-one and one-to-many relationships. +- **VectorData**: A data structure for representing a column of a **DynamicTable**. +- **VectorIndex**: A data structure for indexing a **VectorData**. This is used to store one-to-many relationships. +- **ElementIdentifiers**: A 1D array for storing primary identifiers for elements of a table. +- **DynamicTableRegion**: A data structure for linking to a row or set of rows of a **DynamicTable**. +- **AlignedDynamicTable**: A **DynamicTable** that supports storing a collection of sub-tables. +- **CSRMatrix**: A compressed sparse row matrix. +- **HERD**: A set of tables that track external resource references in a file or across multiple files + +The schema also provides the following base data structures: + +- **Data**: An abstract data type for a dataset. +- **Container**: An abstract data type for a generic container storing collections of data and metadata, i.e., a group. +- **SimpleMultiContainer**: A simple container that holds multiple ``Data`` and ``Container`` types. + +Finally, HDMF-common contains experimental data structures. Prior to adding a new data type to the HDMF-common +specification, new data structures are added to the HDMF-experimental to enable users to experiment with these data +structures. Because these data structures are experimental, they are not guaranteed to maintain backward compatibility, +and may never make it into HDMF-common. + +Current experimental data types are: + +- **EnumData**: A data structure for representing a column where the data come from a fixed set of elements. + +## Generate documentation + +```bash +pip install -r requirements-doc.txt +cd docs +make fulldoc +``` diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/base.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/base.yaml new file mode 100644 index 000000000..1e8cbc003 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/base.yaml @@ -0,0 +1,21 @@ +# hdmf-schema-language=2.0.2 +datasets: +- data_type_def: Data + doc: An abstract data type for a dataset. + +groups: +- data_type_def: Container + doc: An abstract data type for a group storing collections of data and + metadata. Base type for all data and metadata containers. + +- data_type_def: SimpleMultiContainer + data_type_inc: Container + doc: A simple Container for holding onto multiple containers. + datasets: + - data_type_inc: Data + quantity: '*' + doc: Data objects held within this SimpleMultiContainer. + groups: + - data_type_inc: Container + quantity: '*' + doc: Container objects held within this SimpleMultiContainer. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/experimental.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/experimental.yaml new file mode 100644 index 000000000..69dc4ad6e --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/experimental.yaml @@ -0,0 +1,14 @@ +groups: [] +datasets: +- data_type_def: EnumData + data_type_inc: VectorData + dtype: uint8 + doc: Data that come from a fixed set of values. A data value of i corresponds + to the i-th value in the VectorData referenced by the 'elements' attribute. + attributes: + - name: elements + dtype: + target_type: VectorData + reftype: object + doc: Reference to the VectorData object that contains the enumerable elements. + diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/namespace.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/namespace.yaml new file mode 100644 index 000000000..7e31dba53 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/namespace.yaml @@ -0,0 +1,51 @@ +# hdmf-schema-language=2.0.2 +namespaces: +- name: hdmf-common + doc: Common data structures provided by HDMF + author: + - Andrew Tritt + - Oliver Ruebel + - Ryan Ly + - Ben Dichter + contact: + - ajtritt@lbl.gov + - oruebel@lbl.gov + - rly@lbl.gov + - bdichter@lbl.gov + full_name: HDMF Common + schema: + - doc: base data types + source: base.yaml + title: Base data types + - doc: data types for a column-based table + source: table.yaml + title: Table data types + - doc: data types for different types of sparse matrices + source: sparse.yaml + title: Sparse data types + - doc: data types for storing references to web accessible resources + source: resources.yaml + title: Resource reference data types + version: 1.9.0 + +- name: hdmf-experimental + doc: Experimental data structures provided by HDMF. These are not guaranteed to be available in the future. + author: + - Andrew Tritt + - Oliver Ruebel + - Ryan Ly + - Ben Dichter + - Matthew Avaylon + contact: + - ajtritt@lbl.gov + - oruebel@lbl.gov + - rly@lbl.gov + - bdichter@lbl.gov + - mavaylon@lbl.gov + full_name: HDMF Experimental + schema: + - namespace: hdmf-common + - doc: Experimental data types + source: experimental.yaml + title: Experimental data types + version: 0.6.0 diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/resources.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/resources.yaml new file mode 100644 index 000000000..5e091c772 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/resources.yaml @@ -0,0 +1,105 @@ +# hdmf-schema-language=2.0.2 +groups: +- data_type_def: HERD + data_type_inc: Container + doc: "HDMF External Resources Data Structure. A set of six tables for tracking external resource references in a file or across multiple files." + datasets: + - data_type_inc: Data + name: keys + doc: A table for storing user terms that are used to refer to external resources. + dtype: + - name: key + dtype: text + doc: The user term that maps to one or more resources in the `resources` table, e.g., "human". + dims: + - num_rows + shape: + - null + + - data_type_inc: Data + name: files + doc: A table for storing object ids of files used in external resources. + dtype: + - name: file_object_id + dtype: text + doc: The object id (UUID) of a file that contains objects that refers to external resources. + dims: + - num_rows + shape: + - null + + - data_type_inc: Data + name: entities + doc: A table for mapping user terms (i.e., keys) to resource entities. + dtype: + - name: entity_id + dtype: text + doc: "The compact uniform resource identifier (CURIE) of the entity, in the form + [prefix]:[unique local identifier], e.g., 'NCBI_TAXON:9606'." + - name: entity_uri + dtype: text + doc: "The URI for the entity this reference applies to. This can be an empty string. + e.g., https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606" + dims: + - num_rows + shape: + - null + + - data_type_inc: Data + name: objects + doc: A table for identifying which objects in a file contain references to external resources. + dtype: + - name: files_idx + dtype: uint + doc: The row index to the file in the `files` table containing the object. + - name: object_id + dtype: text + doc: The object id (UUID) of the object. + - name: object_type + dtype: text + doc: The data type of the object. + - name: relative_path + dtype: text + doc: The relative path from the data object with the `object_id` to the dataset or attribute + with the value(s) that is associated with an external resource. This can be an empty + string if the object is a dataset that contains the value(s) that is associated + with an external resource. + - name: field + dtype: text + doc: The field within the compound data type using an external resource. This is used only if + the dataset or attribute is a compound data type; otherwise this should be an empty + string. + dims: + - num_rows + shape: + - null + + - data_type_inc: Data + name: object_keys + doc: A table for identifying which objects use which keys. + dtype: + - name: objects_idx + dtype: uint + doc: The row index to the object in the `objects` table that holds the key + - name: keys_idx + dtype: uint + doc: The row index to the key in the `keys` table. + dims: + - num_rows + shape: + - null + + - data_type_inc: Data + name: entity_keys + doc: A table for identifying which keys use which entity. + dtype: + - name: entities_idx + dtype: uint + doc: The row index to the entity in the `entities` table. + - name: keys_idx + dtype: uint + doc: The row index to the key in the `keys` table. + dims: + - num_rows + shape: + - null diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/sparse.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/sparse.yaml new file mode 100644 index 000000000..09de81772 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/sparse.yaml @@ -0,0 +1,35 @@ +# hdmf-schema-language=2.0.2 +groups: +- data_type_def: CSRMatrix + data_type_inc: Container + doc: 'A compressed sparse row matrix. Data are stored in the standard CSR format, where column indices for row i are + stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in data[indptr[i]:indptr[i+1]].' + attributes: + - name: shape + dtype: uint + dims: + - number of rows, number of columns + shape: + - 2 + doc: The shape (number of rows, number of columns) of this sparse matrix. + datasets: + - name: indices + dtype: uint + dims: + - number of non-zero values + shape: + - null + doc: The column indices. + - name: indptr + dtype: uint + dims: + - number of rows in the matrix + 1 + shape: + - null + doc: The row index pointer. + - name: data + dims: + - number of non-zero values + shape: + - null + doc: The non-zero values in the matrix. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/common/table.yaml b/nwb-schema/2.10.0/hdmf-common-schema/common/table.yaml new file mode 100644 index 000000000..87b383372 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/common/table.yaml @@ -0,0 +1,202 @@ +# hdmf-schema-language=2.0.2 +datasets: + +- data_type_def: VectorData + data_type_inc: Data + doc: An n-dimensional dataset representing a column of a DynamicTable. + If used without an accompanying VectorIndex, first dimension is + along the rows of the DynamicTable and each step along the first + dimension is a cell of the larger table. VectorData can also be + used to represent a ragged array if paired with a VectorIndex. + This allows for storing arrays of varying length in a single cell + of the DynamicTable by indexing into this VectorData. The first + vector is at VectorData[0:VectorIndex[0]]. The second vector is at + VectorData[VectorIndex[0]:VectorIndex[1]], and so on. + dims: + - - dim0 + - - dim0 + - dim1 + - - dim0 + - dim1 + - dim2 + - - dim0 + - dim1 + - dim2 + - dim3 + shape: + - - null + - - null + - null + - - null + - null + - null + - - null + - null + - null + - null + attributes: + - name: description + dtype: text + doc: Description of what these vectors represent. + +- data_type_def: VectorIndex + data_type_inc: VectorData + dtype: uint8 + doc: Used with VectorData to encode a ragged array. An array of indices + into the first dimension of the target VectorData, forming a map + between the rows of a DynamicTable and the indices of the VectorData. + The name of the VectorIndex is expected to be the name of the target + VectorData object followed by "_index". + dims: + - num_rows + shape: + - null + attributes: + - name: target + dtype: + target_type: VectorData + reftype: object + doc: Reference to the target dataset that this index applies to. + +- data_type_def: ElementIdentifiers + data_type_inc: Data + default_name: element_id + dtype: int32 + dims: + - num_elements + shape: + - null + doc: A list of unique identifiers for values within a dataset, e.g. rows of a DynamicTable. + +- data_type_def: DynamicTableRegion + data_type_inc: VectorData + dtype: int32 + doc: DynamicTableRegion provides a link from one table to an index or region of another. The `table` attribute is a + link to another `DynamicTable`, indicating which table is referenced, and the data is int(s) indicating the row(s) + (0-indexed) of the target array. `DynamicTableRegion`s can be used to associate rows with repeated meta-data without + data duplication. They can also be used to create hierarchical relationships between multiple `DynamicTable`s. + `DynamicTableRegion` objects may be paired with a `VectorIndex` object to create ragged references, so a single cell + of a `DynamicTable` can reference many rows of another `DynamicTable`. + dims: + - num_rows + shape: + - null + attributes: + - name: table + dtype: + target_type: DynamicTable + reftype: object + doc: Reference to the DynamicTable object that this region applies to. + - name: description + dtype: text + doc: Description of what this table region points to. + +groups: + +- data_type_def: DynamicTable + data_type_inc: Container + doc: A group containing multiple datasets that are aligned on the first dimension + (Currently, this requirement is left up to APIs to check and enforce). These datasets + represent different columns in the table. Apart from a column that contains unique + identifiers for each row, there are no other required datasets. Users are free to add + any number of custom VectorData objects (columns) here. DynamicTable also supports + ragged array columns, where each element can be of a different size. To add a ragged + array column, use a VectorIndex type to index the corresponding VectorData type. + See documentation for VectorData and VectorIndex for more details. + Unlike a compound data type, which is analogous to storing an + array-of-structs, a DynamicTable can be thought of as a struct-of-arrays. This provides + an alternative structure to choose from when optimizing storage for anticipated access + patterns. Additionally, this type provides a way of creating a table without having to + define a compound type up front. Although this convenience may be attractive, users + should think carefully about how data will be accessed. DynamicTable is more appropriate + for column-centric access, whereas a dataset with a compound type would be more + appropriate for row-centric access. Finally, data size should also be taken into account. + For small tables, performance loss may be an acceptable trade-off for the flexibility of + a DynamicTable. + attributes: + - name: colnames + dtype: text + dims: + - num_columns + shape: + - null + doc: The names of the columns in this table. This should be used to specify + an order to the columns. + - name: description + dtype: text + doc: Description of what is in this dynamic table. + datasets: + - name: id + data_type_inc: ElementIdentifiers + dims: + - num_rows + shape: + - null + doc: Array of unique identifiers for the rows of this dynamic table. + - data_type_inc: VectorData + doc: Vector columns, including index columns, of this dynamic table. + quantity: '*' + groups: + - name: meanings_tables + doc: Group containing MeaningsTable objects that provide meanings for + values in VectorData columns within this DynamicTable. + quantity: '?' + groups: + - data_type_inc: MeaningsTable + doc: MeaningsTable objects that provide meanings for values + in VectorData columns within this DynamicTable. Tables should be named + according to the column they provide meanings for with a "_meanings" suffix. + e.g., if a VectorData column is named "stimulus_type", the corresponding + MeaningsTable should be named "stimulus_type_meanings". + quantity: '*' + +- data_type_def: AlignedDynamicTable + data_type_inc: DynamicTable + doc: DynamicTable container that supports storing a collection of sub-tables. Each + sub-table is a DynamicTable itself that is aligned with the main table by row + index. I.e., all DynamicTables stored in this group MUST have the same number + of rows. This type effectively defines a 2-level table in which the main data + is stored in the main table implemented by this type and additional columns of + the table are grouped into categories, with each category being represented by + a separate DynamicTable stored within the group. + attributes: + - name: categories + dtype: text + dims: + - num_categories + shape: + - null + doc: The names of the categories in this AlignedDynamicTable. Each category is + represented by one DynamicTable stored in the parent group. This attribute should + be used to specify an order of categories and the category names must match + the names of the corresponding DynamicTable in the group. + groups: + - data_type_inc: DynamicTable + doc: A DynamicTable representing a particular category for columns in the AlignedDynamicTable + parent container. The table MUST be aligned with (i.e., have the same number + of rows) as all other DynamicTables stored in the AlignedDynamicTable parent + container. The name of the category is given by the name of the DynamicTable + and its description by the description attribute of the DynamicTable. + quantity: '*' + +- data_type_def: MeaningsTable + data_type_inc: DynamicTable + doc: A table to store information about the meanings of values in a linked VectorData object. + All possible values of the linked VectorData object should be present in the 'value' column + of this table, even if the value is not observed in the data. Additional columns may be + added to store additional metadata about each value. The name of the MeaningsTable + should correspond to the name of the linked VectorData object with a "_meanings" suffix. + e.g., if the linked VectorData object is named "stimulus_type", the corresponding + MeaningsTable should be named "stimulus_type_meanings". + datasets: + - name: value + data_type_inc: VectorData + doc: The value of a row in the linked VectorData object. + - name: meaning + data_type_inc: VectorData + dtype: text + doc: The meaning of the value in the linked VectorData object. + links: + - name: target + target_type: VectorData + doc: Link to the VectorData object for which this table provides meanings. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/Makefile b/nwb-schema/2.10.0/hdmf-common-schema/docs/Makefile new file mode 100644 index 000000000..421ccef4d --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/Makefile @@ -0,0 +1,183 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXAPIDOC = sphinx-apidoc +PAPER = +BUILDDIR = _build +RSTDIR = source +CONFDIR = $(PWD)/$(RSTDIR) + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(RSTDIR) +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext fulldoc allclean + +help: + @echo "To update documentation sources from the format specification please use \`make apidoc'" + @echo "" + @echo "To build the documentation please use \`make ' where is one of" + @echo " fulldoc to rebuild the apidoc, html, and latexpdf documents all at once" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " apidoc to build RST from source code" + @echo " clean to clean all documents built by Sphinx in _build" + @echo " allclean to clean all autogenerated documents both from Sphinx and apidoc" + +allclean: + $(MAKE) clean + -rm $(RSTDIR)/_format_auto_docs/*.png + -rm $(RSTDIR)/_format_auto_docs/*.pdf + -rm $(RSTDIR)/_format_auto_docs/*.rst + -rm $(RSTDIR)/_format_auto_docs/*.inc + -rm $(RSTDIR)/_format_auto_docs/git_hash.txt + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sample.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sample.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/sample" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sample" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +apidoc: + PYTHONPATH=$(CONFDIR):$(PYTHONPATH) hdmf_generate_format_docs + @echo + @echo "Generate rst source files from HDMF-common spec." + +fulldoc: + $(MAKE) allclean + @echo + @echo "Rebuilding apidoc, html, latexpdf" + $(MAKE) apidoc + $(MAKE) html + $(MAKE) latexpdf diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/Readme.md b/nwb-schema/2.10.0/hdmf-common-schema/docs/Readme.md new file mode 100644 index 000000000..9cf003c28 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/Readme.md @@ -0,0 +1,96 @@ +**Overview** + +The HDMF-common specification documentation uses Sphinx [http://www.sphinx-doc.org/en/stable/index.html](http://www.sphinx-doc.org/en/stable/index.html) + +**Prerequisites** + +```pip install hdmf-docutils``` + +**Rebuilding All** + +To rebuild the full documentation in html, latex, and PDF simply run: + +```make fulldoc``` + +This is a convenience function that is equivalent to: + +``` +make allclean +make apidoc +make html +make latexpdf +``` + +**Generating the format documentation from the format spec** + +The format documentation is auto-generated from the format specification (YAML/JSON) sources via: + +```make apidoc``` + +This will invoke the ``hdmf_generate_format_docs`` executable provided by ``hdmf-docutils`` package to automatically generate a series of .rst, .png, and .pdf files that are stored in the folder ```source/_format_auto_docs```. The generated .rst files are included in ```source/format.rst``` and the png and pdf files are used as figures in the autogenerated docs. The folder ```source/format_auto_docs``` is reserved for autogenerated files, i.e., files in the folder should not be added or edited by hand as they will be deleted and rebuilt during the full build of the documentation. + +By default the Sphinx configuration is setup to always regenerate the sources whenever the docs are being built (see next section). This behavior can be customized via the ```spec_doc_rebuild_always``` parameter in ```source/conf.py``` + +**Building a specific document type** + +To build the documentation simply: + +```make ``` + +where `````` is, e.g., ```latexpdf```, ```html```, ```singlehtml``` or ```man```. For a complete list of supported doc-types see: + +```make help``` + +**Cleaning up** + +```make clean``` cleans up all builds of the documentation located in ```_build```. + +```make allclean``` cleans up all builds of the documentation located in ```_build``` as wellas all autogenerated sources stored in ```source/format_auto_docs```. + +**Configuration** + +The build of the documentation can be customized via a broad range of Sphinx options in: + +```source/conf_doc_autogen.py``` + +In addition to standard Sphinx options, there are a number of additional options used to customize the content and structure of the autogenerated documents, e.g.: + +* ```spec_show_yaml_src``` Boolean indicating whether the YAML sources should be included for the different neurodata types +* ```spec_show_json_src``` Boolean indicating whether the JSON sources should be included for the different neurodata types +* ```spec_generate_src_file``` Boolean indicating whether the YAML/JSON sources of the neurodata_types should be rendered in a separate section (True) or in the same location as the main documentation +* ```spec_show_hierarchy_plots ``` Boolean indicating whether we should generate and show figures of the hierarchy defined by the specifications as part of the documentation +* ```spec_file_per_type``` Boolean indicating whether we should generate separate .inc reStructuredText for each neurodata_type (True) +or should all text be added to the main file (False) +* ```spec_show_subgroups_in_tables``` Should subgroups of the main groups be rendered in the table as well. Usually this is disabled since groups are rendered as separate sections in the tex +* ```spec_appreviate_main_object_doc_in_tables``` Appreviate the documentation of the main object for which a table is rendered in the table. This is commonly set to True as doc of the main object is alrready rendered as the main intro for the section describing the object +* ```spec_show_title_for_tables``` Add a title for the table showing the specifications. +* ```spec_show_subgroups_in_seperate_table``` Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table +* ```spec_table_depth_char``` Char to be used as prefix to indicate the depth of an object in the specification hierarchy. NOTE: The char used should be supported by LaTeX. +* ```spec_add_latex_clearpage_after_ndt_sections``` Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included. +* ```spec_resolve_type_inc``` Resolve includes to always show the full list of objects that are part of a type (True) or to show only the parts that are actually new to a current type while only linking to base types (False) + +In addition, the location of the input format specification can be customized as follows: + + +* ```spec_input_spec_dir``` Directory where the YAML files for the namespace to be documented are located +* ```spec_input_namespace_filename``` Name of the YAML (or JSON) file with the specification of the Namespace to be documented +* ```spec_input_default_namespace``` Name of the default namespace in the file + +Finally, the name and location of output files can be customized as follows: + + +* ```spec_output_dir``` Directory where the autogenerated files should be stored +* ```spec_output_master_filename``` Name of the master rst file that includes all the autogenerated docs +* ```spec_output_doc_filename``` Name of the file where the main documentation goes +* ```spec_output_src_filename``` Name of the file where the sources of the format spec go. NOTE: This file is only generated if spec_generate_src_file is enabled +* ```spec_output_doc_type_hierarchy_filename``` Name of the file containing the type hierarchy. (Included in spec_output_doc_filename) + +To speed up the build of the format docs we can prevent the ``hdmf_generate_format_docs`` executable from regenerating the sources from YAML if the git-hash from the previous build is still current. This is controlled via the following options: + +* ``spec_clean_output_dir_if_old_git_hash`` Clean up the output directory before we generate the source if the git hash is out of date + +* ``spec_skip_doc_autogen_if_current_git_hash`` Do not rebuild the format sources if we have previously build the sources and the git hash matches + +In the regular Sphinx ```source/conf.py``` file we can then also set: + +* ```spec_doc_rebuild_always``` Boolean to define to always rebuild the source docs from YAML when doing a regular build of the sources (e.g., via ```make html```) even if the folder with the source files already exists diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/make.bat b/nwb-schema/2.10.0/hdmf-common-schema/docs/make.bat new file mode 100644 index 000000000..13874ecb1 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/make.bat @@ -0,0 +1,224 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set RSTDIR=source +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% %RSTDIR% +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. fulldoc to rebuild the apidoc, html, and latex documents all at once + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + echo. apidoc to build RST from source code + echo. clean to clean all documents built by Sphinx in _build + echo. allclean to clean all autogenerated documents both from Sphinx and apidoc + goto end +) + +if "%1" == "allclean" ( + make clean + del /q %RSTDIR%\_format_auto_docs\*.png + del /q %RSTDIR%\_format_auto_docs\*.pdf + del /q %RSTDIR%\_format_auto_docs\*.rst + del /q %RSTDIR%\_format_auto_docs\*.inc + del /q %RSTDIR%\_format_auto_docs\git_hash.txt + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\sample.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\sample.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +if "%1" == "apidoc" ( + set PYTHONPATH=%cd%/source;%PYTHONPATH% + hdmf_generate_format_docs + if errorlevel 1 exit /b 1 + echo. + echo.Generate rst source files from HDMF-common spec. + goto end +) + +if "%1" == "fulldoc" ( + make allclean + echo. + echo.Rebuilding apidoc, html, latexpdf + make apidoc + make html + make latex + goto end +) + +:end diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/_static/theme_overrides.css b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/_static/theme_overrides.css new file mode 100644 index 000000000..63ee6cc74 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/_static/theme_overrides.css @@ -0,0 +1,13 @@ +/* override table width restrictions */ +@media screen and (min-width: 767px) { + + .wy-table-responsive table td { + /* !important prevents the common CSS stylesheets from overriding + this as on RTD they are loaded after this stylesheet */ + white-space: normal !important; + } + + .wy-table-responsive { + overflow: visible !important; + } +} diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf.py b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf.py new file mode 100644 index 000000000..323ee794e --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf.py @@ -0,0 +1,288 @@ +# -*- coding: utf-8 -*- +# +# sample documentation build configuration file, created by +# sphinx-quickstart on Mon Apr 16 21:22:43 2012. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Not that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +sys.setrecursionlimit(1500) # Attempt to fix problem with pickle on ReadTheDocs + +import sphinx_rtd_theme + +# -- Generate sources from YAML--------------------------------------------------- +spec_doc_rebuild_always = True # Always rebuild the source docs from YAML even if the folder with the source files already exists + +def run_doc_autogen(_): + """ + Execute the autogeneration of Sphinx format docs from the YAML sources + :param _: + """ + import sys + import os + conf_file_dir = os.path.dirname(os.path.abspath(__file__)) + sys.path.append(conf_file_dir) # Need so that generate format docs can find the conf_doc_autogen file + from conf_doc_autogen import spec_output_dir + + if spec_doc_rebuild_always or not os.path.exists(spec_output_dir): + from hdmf_docutils.generate_format_docs import main as generate_docs + generate_docs() + +def setup(app): + app.connect('builder-inited', run_doc_autogen) + app.add_css_file("theme_overrides.css") # overrides for wide tables in RTD theme + + +# -- ext settings ----------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +autoclass_content = 'both' +autodoc_docstring_signature = True +autodoc_member_order = 'bysource' +add_function_parentheses = False +numfig = True + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'source/index' +master_doc = 'index' + +# General information about the project. +project = u'HDMF-common Specification' +copyright = u'2019-2026, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = 'v1.9.0' +# The full version, including alpha/beta/rc tags. +release = 'v1.9.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build', 'test.py'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +#html_theme = 'default' +#html_theme = "sphinxdoc" +html_theme = "sphinx_rtd_theme" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None +#html_logo = 'neuron.png' + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = 'neuron-180x180.png' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'sampledoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +'preamble': +""" +\setcounter{tocdepth}{3} +\setcounter{secnumdepth}{6} +\\usepackage{enumitem} +\setlistdepth{100} +\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of contents}} +""", +} +# \\renewlist{itemize}{itemize}{100} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +#latex_documents = [ +# ('index', 'sample.tex', u'sample Documentation', +# u'Kenneth Reitz', 'manual'), +#] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +#man_pages = [ +# ('index', 'sample', u'sample Documentation', +# [u'Kenneth Reitz'], 1) +#] +# +## If true, show URL addresses after external links. +##man_show_urls = False +# +# +## -- Options for Texinfo output ------------------------------------------------ +# +## Grouping the document tree into Texinfo files. List of tuples +## (source start file, target name, title, author, +## dir menu entry, description, category) +#texinfo_documents = [ +# ('index', 'sample', u'sample Documentation', +# u'Kenneth Reitz', 'sample', 'One line description of project.', +# 'Miscellaneous'), +#] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf_doc_autogen.py b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf_doc_autogen.py new file mode 100644 index 000000000..c703b67bc --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/conf_doc_autogen.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" +Configuration file for generating sources for the format documentation from the YAML specification files +""" + +import os +import hdmf +import hdmf.common + +# -- Input options for the specification files to be used ----------------------- + +# Directory where the YAML files for the namespace to be documented are located +spec_input_spec_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../../common') + +# Name of the YAML (or JSON) file with the specification of the Namespace to be documented +spec_input_namespace_filename = 'namespace.yaml' + +# Name of the default namespace in the file +spec_input_default_namespace = 'hdmf-common' + + +# -- Options for customizing the locations of output files + +# Directory where the autogenerated files should be stored +spec_output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "_format_auto_docs") + +# Clean up the output directory before we build if the git hash is out of date +spec_clean_output_dir_if_old_git_hash = True + +# Do not rebuild the format sources if we have previously build the sources and the git hash matches +spec_skip_doc_autogen_if_current_git_hash = True + +# Name of the master rst file that includes all the autogenerated docs +spec_output_master_filename = 'format_spec_main.inc' + +# Name of the file where the main documentation goes +spec_output_doc_filename = 'format_spec_doc.inc' + +# Name of the file where the sources of the format spec go. NOTE: This file is only generated if +# spec_generate_src_file is enabled +spec_output_src_filename = 'format_spec_sources.inc' + +# Name of the file containing the type hierarchy. (Included in spec_output_doc_filename) +spec_output_doc_type_hierarchy_filename = 'format_spec_type_hierarchy.inc' + +# -- Options for the generation of the documentation from source ---------------- + +# Should the YAML sources be included for the different modules +spec_show_yaml_src = True + +# Show figure of the hierarchy of objects defined by the spec +spec_show_hierarchy_plots = True + +# Should the sources of the neurodata_types (YAML) be rendered in a separate section (True) or +# in the same location as the base documentation +spec_generate_src_file = True + +# Should separate .inc reStructuredText files be generated for each neurodata_type (True) +# or should all text be added to the main file +spec_file_per_type = True + +# Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table +spec_show_subgroups_in_seperate_table = True + +# Appreviate the documentation of the main object for which a table is rendered in the table. +# This is commonly set to True as doc of the main object is alrready rendered as the main intro for the +# section describing the object +spec_appreviate_main_object_doc_in_tables = True + +# Show a title for the tables +spec_show_title_for_tables = True + +# Char to be used as prefix to indicate the depth of an object in the specification hierarchy +spec_table_depth_char = '.' # '→' '.' + +# Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering +# of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included +spec_add_latex_clearpage_after_ndt_sections = True + +# Resolve includes to always show the full list of objects that are part of a type (True) +# or to show only the parts that are actually new to a current type while only linking to base types +spec_resolve_type_inc = False + +# Default type map to be used. This is the type map where dependent namespaces are stored. +spec_default_type_map = hdmf.build.TypeMap(hdmf.spec.NamespaceCatalog()) + +# Default specification classes for groups datasets and namespaces. +spec_group_spec_cls = hdmf.spec.GroupSpec +spec_dataset_spec_cls = hdmf.spec.DatasetSpec +spec_namespace_spec_cls = hdmf.spec.SpecNamespace diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/credits.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/credits.rst new file mode 100644 index 000000000..341a7da3d --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/credits.rst @@ -0,0 +1,26 @@ +******* +Credits +******* + +Authors +======= + +- Andrew Tritt +- Oliver Ruebel +- Ryan Ly +- Ben Dichter +- Matthew Avaylon + +***** +Legal +***** + +Copyright +========= + +.. include:: ../../Legal.txt + +License +======= + +.. include:: ../../license.txt diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array-goal.png b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array-goal.png new file mode 100644 index 000000000..59aefa8bd Binary files /dev/null and b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array-goal.png differ diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array.png b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array.png new file mode 100644 index 000000000..030742add Binary files /dev/null and b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/figures/ragged-array.png differ diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format.rst new file mode 100644 index 000000000..76b869df9 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format.rst @@ -0,0 +1,7 @@ +.. _format: + +**Version** |release| |today| + +.. .. contents:: + +.. include:: _format_auto_docs/format_spec_main.inc diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_description.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_description.rst new file mode 100644 index 000000000..ce62072ff --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_description.rst @@ -0,0 +1,71 @@ +Overview of hdmf-common +======================= + +hdmf-common defines common data structures to be used across applications. + + +.. sec-DynamicTable: + +``DynamicTable`` +---------------- + +The `DynamicTable `_ type is used to store tabular data. The tables are created in a columnar fashion +with each column stored in its own `VectorData `_ object. Rows of the table are assigned unique ids with +the required ``id`` column of type ``ElementIdentifier``. The `colnames` attribute indicates the order of the columns. + + +.. sec-VectorData: + +``VectorData`` +--------------- + +``VectorData`` is the datatype used to store a column in a `DynamicTable `_. If unpaired with a +``VectorIndex`` object the first dimension is the row dimension, which must be the same across all of the columns in +that ``DynamicTable``. + + +.. sec-ragged-arrays: + +Ragged Arrays +-------------- + +(also known as Jagged Arrays) + +Sometimes, you want to have a 2-d array where each row of the array has a different number of elements. For instance, +in neuroscience, when storing the action potential times of sorted neurons, you might want to store them as a +neuron x times matrix, but the problem is that each neuron will have a different number of spikes, so the second +dimension will be inconsistent. + +.. figure:: figures/ragged-array-goal.png + :width: 60% + :alt: ragged array goal + +There are a number of possible solutions to this problem. Some solve it by NaN-padding +the array. You might want to store the spike times of each neuron in a separate dataset, but that will not scale well if +you have many neurons. In HDMF, you would store this using a pair of objects a `VectorData `_ and a ``VectorIndex`` +object. The `VectorData `_ array holds all of the data concatenated as a 1-d array, and it is paired with a link to a +``VectorIndex`` object that indexes the data, forming a map between the rows of the ragged array and the indices of +`VectorData `_. + +.. figure:: figures/ragged-array.png + :width: 100% + :alt: ragged arrays in HDMF + +These objects are generally stored inside a `DynamicTable `_, and the elements of ``VectorIndex`` map +onto the rows of the table. The `VectorData `_ object may be n-dimensional, but only the first dimension is ragged. + + +Experimental data structures +============================ + +The following data structures are currently available under the HDMF-experimental schema. These are subject to change! They are +not guaranteed to exist in the future nor maintain backward compatibility. + + +.. sec-ExternalResources + +``ExternalResources`` +--------------------- + +The `ExternalResources `_ type is used to store references to data stored in external, web-accessible databases. +This information is maintained using four row-based tables. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_release_notes.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_release_notes.rst new file mode 100644 index 000000000..0f6e7c286 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/format_release_notes.rst @@ -0,0 +1,8 @@ +:orphan: + +hdmf-common Release Notes +========================= + +The release notes for the "hdmf-common" namespace has moved :ref:`here `. + +The release notes for the "hdmf-experimental" namespace has moved :ref:`here `. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_common_release_notes.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_common_release_notes.rst new file mode 100644 index 000000000..e763dd21f --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_common_release_notes.rst @@ -0,0 +1,122 @@ +.. _hdmf_common_release_notes: + +hdmf-common Release Notes +========================= + +1.9.0 (January 29, 2026) +------------------------ +- Changed the dtype of ``ElementIdentifiers`` and ``DynamicTableRegion`` from "int" to "int32". Under HDMF + schema language 2.x, "int" means "int32" so this change has no functional impact, except when displaying the + spec, such as in error messages. +- Added ``MeaningsTable``, a table for storing meanings for values in linked ``VectorData`` objects. + This is useful for annotating categorical data stored in a ``DynamicTable``. Added a group named "meanings_tables" + to ``DynamicTable`` to hold ``MeaningsTable`` objects that provide meanings for ``VectorData`` columns in the + ``DynamicTable``. +- Minor changes to data type docstrings to improve clarity. +- Promoted ``HERD`` from the HDMF-experimental namespace to a stable data type in the HDMF-common + namespace. + +1.8.0 (August 4, 2023) +---------------------- +- No change in the hdmf-common namespace. See :ref:`here ` for changes to the + hdmf-experimental namespace. + +1.7.0 (June 22, 2023) +--------------------- +- No change in the hdmf-common namespace. See :ref:`here ` for changes to the + hdmf-experimental namespace. + +1.6.0 (May 3, 2023) +------------------- +- No change in the hdmf-common namespace. See :ref:`here ` for changes to the + hdmf-experimental namespace. + +1.5.1 (January 10, 2022) +------------------------ +- No change in the hdmf-common namespace. See :ref:`here ` for changes to the + hdmf-experimental namespace. + +1.5.0 (April 19, 2021) +---------------------- +- Added ``AlignedDynamicTable``, which defines a ``DynamicTable`` that supports storing a collection of sub-tables. + Each sub-table is itself a ``DynamicTable`` that is aligned with the main table by row index. Each sub-table + defines a sub-category in the main table effectively creating a table with sub-headings to organize columns. + +1.4.0 (March 29, 2021) +------------------------- + +Summary: In 1.4.0, the HDMF-experimental namespace was added, which includes the ``ExternalResources`` and ``EnumData`` +data types. Schema in the HDMF-experimental namespace are experimental and subject to breaking changes at any time. +``ExternalResources`` was changed to support storing both names and URIs for resources. The ``VocabData`` data type was +replaced by ``EnumData`` to provide more flexible support for data from a set of fixed values. + +- Added ``EnumData`` for storing data that comes from a set of fixed values. This replaces ``VocabData`` which could + hold only string values. Also, ``VocabData`` could hold only a limited number of elements (~64k) when used with the + HDF5 storage backend. ``EnumData`` gets around these restrictions by using an untyped dataset (VectorData) instead of + a string attribute to hold the enumerated values. +- Removed ``VocabData``. +- Renamed the "resources" table in ``ExternalResources`` to "entities". +- Created a new "resources" table to store the name and URI of the ontology / external resource used by the "entities" + table in ``ExternalResources``. +- Renamed fields in ``ExternalResources``. +- Added "entities" dataset to ``ExternalResources``. This is a row-based table dataset to replace the functionality of + the "resources" dataset in ``ExternalResources``. +- Changed the "resources" dataset in ``ExternalResources`` to store the name and URI of the ontology / external + resource used by the "entities" dataset in ``ExternalResources``. +- Added HDMF-experimental namespace. +- Moved ``ExternalResources`` and ``EnumData`` to HDMF-experimental. + +1.3.0 (December 2, 2020) +------------------------- + +- Add data type ``ExternalResources`` for storing ontology information / external resource references. NOTE: this + data type is in beta testing and is subject to change in a later version. +- Changed dtype for datasets within ``CSRMatrix`` from 'int' to 'uint'. Negative values do not make sense for these + datasets. + +1.2.1 (November 4, 2020) +------------------------ + +- Update software process documentation for maintainers. +- Fix missing data_type_inc for ``CSRMatrix``. It now has ``data_type_inc: Container``. +- Add ``hdmf-schema-language`` comment at the top of each yaml file. +- Add ``SimpleMultiContainer``, a Container for storing other Container and Data objects together + +1.2.0 (July 10, 2020) +------------------------ + +- Add software process documentation. +- Fix missing dtype for ``VectorIndex``. +- Add new ``VocabData`` data type. +- Move ``Data``, ``Index``, and ``Container`` to base.yaml. This change does not functionally change the schema. +- ``VectorIndex`` now extends ``VectorData`` instead of ``Index``. This change allows ``VectorIndex`` to index other + ``VectorIndex`` types. +- The ``Index`` data type is now unused and has been removed. +- Fix documentation for ragged arrays. + +1.1.3 (January 21, 2020) +------------------------ + +- Fix missing 'shape' and 'dims' key for types ``VectorData``, ``VectorIndex``, and ``DynamicTableRegion``. + +1.1.2 (January 9, 2020) +----------------------- + +- Fix version number in namespace.yaml and docs + +1.1.1 (January 9, 2020) +----------------------- + +- Support for ReadTheDocs continuous documentation was added, and legal/license documents were also added. The schema is + unchanged. + +1.1.0 (January 3, 2020) +----------------------- + +- The 'colnames' attribute of ``DynamicTable`` changed from data type 'ascii' to 'text'. +- Improved documentation and type docstrings. + +1.0.0 (September 26, 2019) +-------------------------- + +Initial release. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_experimental_release_notes.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_experimental_release_notes.rst new file mode 100644 index 000000000..e5f5a75d6 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/hdmf_experimental_release_notes.rst @@ -0,0 +1,35 @@ +.. _hdmf_experimental_release_notes: + +hdmf-experimental Release Notes +=============================== + +0.6.0 (January 29, 2026) +------------------------ +- Promoted ``HERD`` to a stable data type in the HDMF-common namespace. See :ref:`here ` for details. + +0.5.0 (August 4, 2023) +---------------------- +- Updates ``ExternalResources`` to have a uniform name throughout the codebase and the literature, which is now ``HERD`` + (HDMF External Resources Data). +- Fixed schema bug regarding the missing quote. + +0.4.0 (June 22, 2023) +--------------------- +- In the experimental ``ExternalResources``, added a ``entity_keys`` table and removed ``keys_idx`` from the ``entities`` table. + +0.3.0 (May 3, 2023) +------------------- +- In the experimental ``ExternalResources``, added a ``files`` table, removed the ``resources`` table, and adjusted + existing columns. + +0.2.0 (January 10, 2022) +------------------------ +- In the experimental ``ExternalResources``, added ``relative_path`` field to the "objects" table dtype. This is used in + place of the previous ``field`` field representing the relative path to get to the dataset/attribute from the object. + The previous ``field`` field will be used to represent a compound type field name if the dataset/attribute is a + compound dtype. +- Updated contributors. + +0.1.0 (March 29, 2021) +---------------------- +- See the release notes for :ref:`hdmf-common 1.4.0 ` for details. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/index.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/index.rst new file mode 100644 index 000000000..5a5efcd6e --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/index.rst @@ -0,0 +1,55 @@ +Welcome to the HDMF-common Format Specification +=============================================== + +.. raw:: latex + + \part{Introduction} + +.. toctree:: + :numbered: + :maxdepth: 2 + :caption: Introduction + + format_description + +.. raw:: latex + + \part{Format Specification} + +.. toctree:: + :numbered: + :maxdepth: 3 + :caption: Format Specification + + format + +.. raw:: latex + + \part{Resources} + +.. toctree:: + :maxdepth: 2 + :caption: Resources + + software_process + +.. raw:: latex + + \part{History and Legal} + +.. toctree:: + :maxdepth: 2 + :caption: History & Legal + + hdmf_common_release_notes + hdmf_experimental_release_notes + credits + + +.. + Indices and tables + ================== + + * :ref:`genindex` + * :ref:`modindex` + * :ref:`search` diff --git a/nwb-schema/2.10.0/hdmf-common-schema/docs/source/software_process.rst b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/software_process.rst new file mode 100644 index 000000000..f37fcaf98 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/docs/source/software_process.rst @@ -0,0 +1,95 @@ +Making a Pull Request +===================== + +Actions to take on each PR that modifies the schema and does not prepare the schema for a public release +(this is also in the `GitHub PR template`_): + +If the current schema version on "main" is a public release, then: + +1. Update the version string in ``docs/source/conf.py`` and ``common/namespace.yaml`` to the next version with the + suffix "-alpha" +2. Add a new section in the release notes for the new version with the date "Upcoming" + +Always: + +1. Add release notes for the PR to ``docs/source/hdmf_common_release_notes.rst`` and/or + ``docs/source/hdmf_experimental_release_notes.rst`` + +Documentation or internal changes to the repo (i.e., changes that do not affect the schema files) +do not need to be accompanied with a version bump or addition to the release notes. + +.. _`GitHub PR template`: https://github.com/hdmf-dev/hdmf-common-schema/blob/main/.github/PULL_REQUEST_TEMPLATE.md + + +Merging PRs and Making Releases +=============================== + +**Public release**: a tagged release of the schema. The version string MUST NOT have a suffix indicating a pre-release, +such as "-alpha". The current "dev" branch of HDMF and all HDMF releases MUST point to a public release of +hdmf-common-schema. All schema that use hdmf-common-schema as a submodule MUST also point only to public releases. + +**Internal release**: a state of the schema "main" branch where the version string ends with "-alpha". + +The default branch of hdmf-common-schema is "main". **The "main" branch holds the bleeding edge version of +the hdmf-common schema specification.** + +PRs should be made to "main". Every PR should include an update to the namespace release notes +(``docs/source/hdmf_common_release_notes.rst`` and/or ``docs/source/hdmf_experimental_release_notes.rst``). +If the current version is a public release, then the PR should also update the version of the schema in two places: +``docs/source/conf.py`` and ``common/namespace.yaml``. The new version should be the next bugfix/minor/major version +of the schema with the suffix "-alpha". For example, if the current schema on "main" has version "2.2.0", +then a PR implementing a bug fix should update the schema version from "2.2.0" to "2.2.1-alpha". Appending the "-alpha" +suffix ensures that any person or API accessing the default "main" branch of the repo containing an internal release +of the schema receives the schema with a version string that is distinct from public releases of the schema. If the +current schema on "main" is already an internal release, then the version string does not need to be updated unless +the PR requires an upgrade in the version (e.g., from bugfix to minor). + +HDMF should contain a branch and PR that tracks the "main" branch of hdmf-common-schema. Before +a public release of hdmf-common-schema is made, this HDMF branch should be checked to ensure that when the new release +is made, the branch can be merged without issue. + +Immediately prior to making a new public release, the version of the schema should be updated to remove the "-alpha" +suffix and the documentation and release notes should be updated as needed (see next section). + +The current "dev" branch of HDMF and all HDMF releases MUST always point to a public release of hdmf-common-schema. If +a public release contains an internally released version of hdmf-common-schema, e.g., from an untagged commit on the +"main" branch, then it will be difficult to find the version (commit) of hdmf-common-schema that was used to create +an HDMF file when the schema is not cached. + +Making a Release Checklist +========================== + +Before merging: + +1. Update requirements versions as needed +2. Update legal file dates and information in ``Legal.txt``, ``license.txt``, ``README.md``, ``docs/source/conf.py``, + and any other locations as needed +3. Update ``README.md`` as needed +4. Update the version string in ``docs/source/conf.py`` and ``common/namespace.yaml`` (remove "-alpha" suffix) +5. Update ``docs/source/conf.py`` as needed +6. Update release notes (set release date) in `docs/source/hdmf_common_release_notes.rst`, + `docs/source/hdmf_experimental_release_notes.rst`, and any other docs as needed +7. Test docs locally (``cd docs; make fulldoc``) where the hdmf-common-schema submodule in the local version of HDMF + is fully up-to-date with the head of the main branch. +8. Push changes to a new PR and make sure all PRs to be included in this release have been merged. Add + ``?template=release.md`` to the PR URL to auto-populate the PR with this checklist. +9. Check that the readthedocs build for this PR succeeds (build latest to pull the new branch, then activate and + build docs for new branch): https://readthedocs.org/projects/hdmf-common-schema/builds/ + +After merging: + +1. Create a new git tag. Pull the latest main branch locally, run ``git tag [version] --sign``, copy and paste the + release notes into the tag message, and run ``git push --tags``. +2. On the `GitHub tags`_ page, click "..." -> "Create release" for the new tag on the right side of the page. + Copy and paste the release notes into the release message, update the formatting if needed (reST to Markdown), + and set the title to the version string. +3. Check that the readthedocs "latest" and "stable" builds run and succeed. Delete the readthedocs build for the + merged PR. https://readthedocs.org/projects/hdmf-common-schema/builds/ +4. Update the HDMF submodule in the HDMF branch corresponding to this schema version to point to the tagged commit. + +This checklist can also be found in the `GitHub release PR template`_. + +The time between merging this PR and creating a new public release should be minimized. + +.. _`GitHub tags`: https://github.com/hdmf-dev/hdmf-common-schema/tags +.. _`GitHub release PR template`: https://github.com/hdmf-dev/hdmf-common-schema/blob/main/.github/PULL_REQUEST_TEMPLATE/release.md diff --git a/nwb-schema/2.10.0/hdmf-common-schema/hdmf-common.schema.json b/nwb-schema/2.10.0/hdmf-common-schema/hdmf-common.schema.json new file mode 100644 index 000000000..fc0f381e3 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/hdmf-common.schema.json @@ -0,0 +1,280 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "hdmf-common.schema.json", + "title": "Schema for the HDMF Common specification", + "description": "A schema for validating HDMF Common YAML Files", + "version": "1.8.0", + "type": "object", + "additionalProperties": false, + "properties": { + "groups": {"$ref": "#/definitions/groups"}, + "attributes": {"$ref": "#/definitions/attributes"}, + "datasets": {"$ref": "#/definitions/datasets"}, + "links": {"$ref": "#/definitions/links"}, + "namespaces": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "version", + "author", + "contact" + ], + "properties": { + "name": {"type": "string"}, + "version": {"type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"}, + "doc": {"type": "string"}, + "author": {"type": "array", "items": {"type": "string"}}, + "contact": {"type": "array", "items": {"type": "string", "format": "email"}}, + "full_name": {"type": "string"}, + "date": {"type": "string", "format": "date"}, + "schema": { + "type": "array", + "items": { + "type": "object", + "oneOf": [ + {"required": ["namespace"]}, + {"required": ["source"]} + ], + "properties": { + "namespace": {"type": "string"}, + "doc": {"type": "string"}, + "source": {"type": "string"}, + "title": {"type": "string"}, + "data_types": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + } + } + }, + "definitions": { + "protectedString": { + "type": "string", + "pattern": "^[A-Za-z_][A-Za-z0-9_]*$" + }, + "quantity": { + "description": "Quantity marker", + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "string", + "enum": [ + "*", + "?", + "+", + "zero_or_many", + "one_or_many", + "zero_or_one" + ] + } + ] + }, + "flat_dtype": { + "description": "Required string describing the data type of the attribute", + "anyOf": [ + { + "type": "string", + "enum": [ + "float", + "float32", + "double", + "float64", + "long", + "int64", + "int", + "int32", + "int16", + "int8", + "uint", + "uint32", + "uint16", + "uint8", + "uint64", + "numeric", + "text", + "utf", + "utf8", + "utf-8", + "ascii", + "bool", + "isodatetime" + ] + }, + {"$ref": "#/definitions/ref_dtype"} + ] + }, + "dtype": { + "anyOf": [ + {"$ref": "#/definitions/flat_dtype"}, + {"$ref": "#/definitions/compound_dtype"} + ] + }, + "dims": { + "description": "Optional list describing the names of the dimensions of the data array stored by the attribute (default=None)", + "type": "array", + "items": { + "anyOf": [ + {"type": "string"}, + { + "type": "array", + "items": {"type": "string"} + } + ] + } + }, + "shape": { + "description": "Optional list describing the allowed shape(s) of the data array stored by the attribute (default=None)", + "anyOf": [ + {"$ref": "#/definitions/shape_spec"}, + { + "type": "array", + "items": {"$ref": "#/definitions/shape_spec"} + } + ] + }, + "shape_spec": { + "type": "array", + "items": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "value": null + } + ] + } + }, + "ref_dtype": { + "type": "object", + "required": ["target_type", "reftype"], + "properties": { + "target_type": { + "description": "Describes the data_type of the target that the reference points to", + "type": "string" + }, + "reftype": { + "description": "describes the kind of reference", + "type": "string", + "enum": ["ref", "reference", "object", "region"] + } + } + }, + "compound_dtype": { + "type": "array", + "items": { + "type": "object", + "required": ["name", "doc", "dtype"], + "properties": { + "name": {"$ref": "#/definitions/protectedString"}, + "doc": {"type": "string"}, + "dtype": {"$ref": "#/definitions/flat_dtype"} + } + } + }, + "groups": { + "description": "list of groups", + "type": "array", + "items": { + "title": "group", + "type": "object", + "required": ["doc"], + "additionalProperties": false, + "anyOf":[ + {"required": ["data_type_def"]}, + {"required": ["data_type_inc"]}, + {"required": ["name"]} + ], + "properties": { + "name": {"$ref": "#/definitions/protectedString"}, + "default_name": {"$ref": "#/definitions/protectedString"}, + "doc": {"type": "string"}, + "data_type_def": {"$ref": "#/definitions/protectedString"}, + "data_type_inc": {"$ref": "#/definitions/protectedString"}, + "quantity": {"$ref": "#/definitions/quantity"}, + "linkable": {"type": "boolean"}, + "datasets": {"$ref": "#/definitions/datasets"}, + "links": {"$ref": "#/definitions/links"}, + "groups": {"$ref": "#/definitions/groups"}, + "attributes": {"$ref": "#/definitions/attributes"} + } + } + }, + "attributes": { + "type": "array", + "items": { + "title": "attribute", + "type": "object", + "required": ["doc"], + "additionalProperties": false, + "properties": { + "dtype": {"$ref": "#/definitions/dtype"}, + "dims": {"$ref": "#/definitions/dims"}, + "shape": {"$ref": "#/definitions/shape"}, + "name": {"type": "string"}, + "doc": {"type": "string"}, + "required": {"type": "boolean"}, + "value": {"description": "Optional constant, fixed value for the attribute."}, + "default_value": {"description": "Optional default value for variable-valued attributes."} + } + } + }, + "links": { + "type": "array", + "items": { + "title": "link", + "type": "object", + "required": ["target_type", "doc"], + "additionalProperties": false, + "properties": { + "name": {"type": "string"}, + "doc": {"type": "string"}, + "target_type": {"type": "string"}, + "quantity": {"$ref": "#/definitions/quantity"} + } + } + }, + "datasets": { + "type": "array", + "items": { + "title": "dataset", + "type": "object", + "required": ["doc"], + "additionalProperties": false, + "anyOf":[ + {"required": ["data_type_def"]}, + {"required": ["data_type_inc"]}, + {"required": ["name"]} + ], + "properties": { + "name": {"$ref": "#/definitions/protectedString"}, + "default_name": {"$ref": "#/definitions/protectedString"}, + "doc": {"type": "string"}, + "dtype": {"$ref": "#/definitions/dtype"}, + "dims": {"$ref": "#/definitions/dims"}, + "shape": {"$ref": "#/definitions/shape"}, + "data_type_def": {"$ref": "#/definitions/protectedString"}, + "data_type_inc": {"$ref": "#/definitions/protectedString"}, + "quantity": {"$ref": "#/definitions/quantity"}, + "linkable": {"type": "boolean"}, + "attributes": {"$ref": "#/definitions/attributes"}, + "value": {"description": "Optional constant, fixed value for the attribute."}, + "default_value": {"description": "Optional default value for variable-valued attributes."} + } + } + } + } +} diff --git a/nwb-schema/2.10.0/hdmf-common-schema/license.txt b/nwb-schema/2.10.0/hdmf-common-schema/license.txt new file mode 100644 index 000000000..6468e131d --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/license.txt @@ -0,0 +1,13 @@ +“hdmf-common-schema” Copyright (c) 2019-2026, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +(1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +(2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +(3) Neither the name of the University of California, Lawrence Berkeley National Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to Lawrence Berkeley National Laboratory, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such enhancements or derivative works thereof, in binary and source code form. diff --git a/nwb-schema/2.10.0/hdmf-common-schema/requirements-doc.txt b/nwb-schema/2.10.0/hdmf-common-schema/requirements-doc.txt new file mode 100644 index 000000000..9d45a43f8 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/requirements-doc.txt @@ -0,0 +1 @@ +hdmf-docutils diff --git a/nwb-schema/2.10.0/hdmf-common-schema/setup.cfg b/nwb-schema/2.10.0/hdmf-common-schema/setup.cfg new file mode 100644 index 000000000..990c58ed6 --- /dev/null +++ b/nwb-schema/2.10.0/hdmf-common-schema/setup.cfg @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 120 +max-complexity = 17 +exclude = + .git, + .tox, + __pycache__, + build/, + docs/source/conf.py