Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added JENDL nuclear data #3

Merged
merged 13 commits into from
Feb 23, 2019
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

This repository contains a collection of scripts for generating HDF5 data
libraries that can be used with OpenMC. Some of these scripts convert existing
ACE libraries (such as those produced by LANL) whereas others use NJOY to
ACE libraries (such as those produced by LANL) whereas generate scripts use NJOY to
process ENDF files directly. Note that unless you are interested in making a
customized library, you can find pregenerated HDF5 libraries at
https://openmc.mcs.anl.gov.
17 changes: 5 additions & 12 deletions convert_fendl.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,16 @@
#!/usr/bin/env python3

import os
from collections import defaultdict
import sys
import tarfile
import zipfile
import glob
import argparse
import glob
import os
import ssl
import subprocess
from string import digits
from urllib.request import urlopen, Request
import sys
import zipfile

import openmc.data
from openmc._utils import download


description = """
Download FENDL 3.1d or FENDL 3.1c ACE data from the IAEA and convert it to a HDF5 library for
use with OpenMC.
Expand Down Expand Up @@ -49,7 +44,7 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
library_name = 'fendl'
ace_files_dir = '-'.join([library_name, args.release, 'ace'])
# the destination is decided after the release is know to avoid putting the release in a folder with a misleading name
if args.destination == None:
if args.destination is None:
args.destination = '-'.join([library_name, args.release, 'hdf5'])

# This dictionary contains all the unique information about each release. This can be exstened to accommodated new releases
Expand Down Expand Up @@ -82,8 +77,6 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
if response.lower().startswith('n'):
sys.exit()

block_size = 16384

# ==============================================================================
# DOWNLOAD FILES FROM IAEA SITE

Expand Down
45 changes: 6 additions & 39 deletions convert_jeff32.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
#!/usr/bin/env python3

import argparse
import glob
import os
from collections import defaultdict
import sys
import tarfile
import zipfile
import glob
import argparse
from collections import defaultdict
from string import digits
from urllib.request import urlopen

import openmc.data

from openmc._utils import download

description = """
Download JEFF 3.2 ACE data from OECD/NEA and convert it to a multi-temperature
Expand Down Expand Up @@ -64,47 +63,15 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
'JEFF32-ACE-1800K.tar.gz',
'TSLs.tar.gz']

block_size = 16384

# ==============================================================================
# DOWNLOAD FILES FROM OECD SITE

files_complete = []
for f in files:
# Establish connection to URL
url = base_url + f
req = urlopen(url)

# Get file size from header
if sys.version_info[0] < 3:
file_size = int(req.info().getheaders('Content-Length')[0])
else:
file_size = req.length
downloaded = 0

# Check if file already downloaded
if os.path.exists(f):
if os.path.getsize(f) == file_size:
print('Skipping {}, already downloaded'.format(f))
files_complete.append(f)
continue
else:
overwrite = input('Overwrite {}? ([y]/n) '.format(f))
if overwrite.lower().startswith('n'):
continue

# Copy file to disk
print('Downloading {}... '.format(f), end='')
with open(f, 'wb') as fh:
while True:
chunk = req.read(block_size)
if not chunk: break
fh.write(chunk)
downloaded += len(chunk)
status = '{:10} [{:3.2f}%]'.format(downloaded, downloaded * 100. / file_size)
print(status + chr(8)*len(status), end='')
print('')
files_complete.append(f)
downloaded_file = download(url)
files_complete.append(f)

# ==============================================================================
# EXTRACT FILES FROM TGZ
Expand Down
41 changes: 6 additions & 35 deletions convert_nndc71.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,17 @@
used for OpenMC's regression test suite.
"""

import argparse
import glob
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
import glob
import hashlib
import argparse
from urllib.request import urlopen

import openmc.data
from openmc._utils import download


class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
Expand All @@ -42,7 +42,6 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
'ENDF-B-VII.1-tsl.tar.gz']
checksums = ['9729a17eb62b75f285d8a7628ace1449',
'e17d827c92940a30f22f096d910ea186']
block_size = 16384

# ==============================================================================
# DOWNLOAD FILES FROM NNDC SITE
Expand All @@ -51,36 +50,8 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
for f in files:
# Establish connection to URL
url = base_url + f
req = urlopen(url)

# Get file size from header
file_size = req.length
downloaded = 0

# Check if file already downloaded
if os.path.exists(f):
if os.path.getsize(f) == file_size:
print('Skipping ' + f)
files_complete.append(f)
continue
else:
overwrite = input('Overwrite {}? ([y]/n) '.format(f))
if overwrite.lower().startswith('n'):
continue

# Copy file to disk
print('Downloading {}... '.format(f), end='')
with open(f, 'wb') as fh:
while True:
chunk = req.read(block_size)
if not chunk: break
fh.write(chunk)
downloaded += len(chunk)
status = '{0:10} [{1:3.2f}%]'.format(
downloaded, downloaded * 100. / file_size)
print(status + chr(8)*len(status), end='')
print('')
files_complete.append(f)
downloaded_file = download(url)
files_complete.append(f)

# ==============================================================================
# VERIFY MD5 CHECKSUMS
Expand Down
13 changes: 3 additions & 10 deletions convert_tendl.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,14 @@
#!/usr/bin/env python3

import argparse
import glob
import os
from collections import defaultdict
import sys
import tarfile
import zipfile
import glob
import argparse
from string import digits
from urllib.request import urlopen

import openmc.data
from openmc._utils import download


description = """
Download TENDL 2017 or TENDL 2015 ACE data from PSI and convert it to a HDF5 library for
use with OpenMC.
Expand Down Expand Up @@ -48,7 +43,7 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
library_name = 'tendl' #this could be added as an argument to allow different libraries to be downloaded
ace_files_dir = '-'.join([library_name, args.release, 'ace'])
# the destination is decided after the release is know to avoid putting the release in a folder with a misleading name
if args.destination == None:
if args.destination is None:
args.destination = '-'.join([library_name, args.release, 'hdf5'])

# This dictionary contains all the unique information about each release. This can be exstened to accommodated new releases
Expand Down Expand Up @@ -83,8 +78,6 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
if response.lower().startswith('n'):
sys.exit()

block_size = 16384

# ==============================================================================
# DOWNLOAD FILES FROM WEBSITE

Expand Down
6 changes: 6 additions & 0 deletions download_all.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
python3 data/convert_fendl.py
python3 data/convert_jeff32.py
python3 data/convert_nndc71.py
python3 data/convert_tendl.py
python3 data/generate_endf71.py
python3 data/generate_jendl.py
7 changes: 3 additions & 4 deletions generate_endf71.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,20 @@
#!/usr/bin/env python3

import argparse
from multiprocessing import Pool
import os
from pathlib import Path
import shutil
import sys
import tarfile
import tempfile
from urllib.parse import urljoin
import warnings
import zipfile
from multiprocessing import Pool
from pathlib import Path
from urllib.parse import urljoin

import openmc.data
from openmc._utils import download


# Make sure Python version is sufficient
assert sys.version_info >= (3, 6), "Python 3.6+ is required"

Expand Down
125 changes: 125 additions & 0 deletions generate_jendl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
#!/usr/bin/env python3

import argparse
import glob
import os
import sys
import tarfile

import openmc.data
from openmc._utils import download

description = """
Download JENDL 4.0 data from JAEA and convert it to a HDF5 library for
use with OpenMC.

"""


class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass


parser = argparse.ArgumentParser(
description=description,
formatter_class=CustomFormatter
)
parser.add_argument('-b', '--batch', action='store_true',
help='supresses standard in')
parser.add_argument('-d', '--destination', default=None,
help='Directory to create new library in')
parser.add_argument('--libver', choices=['earliest', 'latest'],
default='latest', help="Output HDF5 versioning. Use "
"'earliest' for backwards compatibility or 'latest' for "
"performance")
parser.add_argument('-r', '--release', choices=['4.0'],
default='4.0', help="The nuclear data library release version. "
"The only option currently supported is 4.0")
args = parser.parse_args()



library_name = 'jendl' #this could be added as an argument to allow different libraries to be downloaded
endf_files_dir = '-'.join([library_name, args.release, 'endf'])
# the destination is decided after the release is known to avoid putting the release in a folder with a misleading name
if args.destination is None:
args.destination = '-'.join([library_name, args.release, 'hdf5'])

# This dictionary contains all the unique information about each release. This can be exstened to accommodated new releases
release_details = {
'4.0': {
'base_url': 'https://wwwndc.jaea.go.jp/ftpnd/ftp/JENDL/',
'files': ['jendl40-or-up_20160106.tar.gz'],
'neutron_files': os.path.join(endf_files_dir, 'jendl40-or-up_20160106', '*.dat'),
'metastables': os.path.join(endf_files_dir, 'jendl40-or-up_20160106', '*m.dat'),
'compressed_file_size': '0.2 GB',
'uncompressed_file_size': '2 GB'
}
}

download_warning = """
WARNING: This script will download {} of data.
Extracting and processing the data requires {} of additional free disk space.

Are you sure you want to continue? ([y]/n)
""".format(release_details[args.release]['compressed_file_size'],
release_details[args.release]['uncompressed_file_size'])

response = input(download_warning) if not args.batch else 'y'
if response.lower().startswith('n'):
sys.exit()

# ==============================================================================
# DOWNLOAD FILES FROM WEBSITE

files_complete = []
for f in release_details[args.release]['files']:
# Establish connection to URL
url = release_details[args.release]['base_url'] + f
downloaded_file = download(url)
files_complete.append(downloaded_file)

# ==============================================================================
# EXTRACT FILES FROM TGZ

for f in release_details[args.release]['files']:
if f not in files_complete:
continue

# Extract files

suffix = ''
with tarfile.open(f, 'r') as tgz:
print('Extracting {0}...'.format(f))
tgz.extractall(path=os.path.join(endf_files_dir, suffix))


# ==============================================================================
# GENERATE HDF5 LIBRARY -- NEUTRON FILES

# Get a list of all ACE files
neutron_files = glob.glob(release_details[args.release]['neutron_files'])

# Create output directory if it doesn't exist
if not os.path.isdir(args.destination):
os.mkdir(args.destination)

library = openmc.data.DataLibrary()

for filename in sorted(neutron_files):

print('Converting: ' + filename)
data = openmc.data.IncidentNeutron.from_njoy(filename)

# Export HDF5 file
h5_file = os.path.join(args.destination, data.name + '.h5')
print('Writing {}...'.format(h5_file))
data.export_to_hdf5(h5_file, 'w', libver=args.libver)

# Register with library
library.register_file(h5_file)

# Write cross_sections.xml
libpath = os.path.join(args.destination, 'cross_sections.xml')
library.export_to_xml(libpath)