Multiresolution Parallel Programming with Chapel

Total Page:16

File Type:pdf, Size:1020Kb

Multiresolution Parallel Programming with Chapel Vassily Litvinov and the Chapel team, Cray Inc. HPC Advisory Council Conference 13 September 2012 Motivation: Programming Models Multiresolution Programming Empirical Evaluation About the Project 2 1 GF – 1988: Cray Y-MP; 8 Processors • Static finite element analysis 1 TF – 1998: Cray T3E; 1,024 Processors • Modeling of metallic magnet atoms 1 PF – 2008: Cray XT5; 150,000 Processors • Superconductive materials 1 EF – ~2018: Cray ____; ~10,000,000 Processors • TBD 3 1 GF – 1988: Cray Y-MP; 8 Processors • Static finite element analysis • Fortran77 + Cray autotasking + vectorization 1 TF – 1998: Cray T3E; 1,024 Processors • Modeling of metallic magnet atoms • Fortran + MPI (Message Passing Interface) 1 PF – 2008: Cray XT5; 150,000 Processors • Superconductive materials • C++/Fortran + MPI + vectorization 1 EF – ~2018: Cray ____; ~10,000,000 Processors • TBD Or Perhaps Something • TBD: C/C++/Fortran + MPI + CUDA/OpenCL/OpenMP/OpenACC Completely Different? 4 Given: m-element vectors A, B, C Compute: i 1..m, Ai = Bi + αCi In pictures: A = B + C · α 5 Given: m-element vectors A, B, C Compute: i 1..m, Ai = Bi + αCi In pictures, in parallel: A = = = = B + + + + C · · · · α 6 Given: m-element vectors A, B, C Compute: i 1..m, Ai = Bi + αCi In pictures, in parallel, distributed memory: A = = = = B + + + + C · · · · α 7 Given: m-element vectors A, B, C Compute: i 1..m, Ai = Bi + αCi In pictures, in parallel, distributed memory, multicore: A = = = = = = = = B + + + + + + + + C · · · · · · · · α 8 #include <hpcc.h> MPI if (!a || !b || !c) { if (c) HPCC_free(c); if (b) HPCC_free(b); if (a) HPCC_free(a); static int VectorSize; if (doIO) { static double *a, *b, *c; fprintf( outFile, "Failed to allocate memory (%d).\n", VectorSize ); int HPCC_StarStream(HPCC_Params *params) { fclose( outFile ); int myRank, commSize; } int rv, errCount; return 1; MPI_Comm comm = MPI_COMM_WORLD; } MPI_Comm_size( comm, &commSize ); MPI_Comm_rank( comm, &myRank ); rv = HPCC_Stream( params, 0 == myRank); for (j=0; j<VectorSize; j++) { MPI_Reduce( &rv, &errCount, 1, MPI_INT, MPI_SUM, b[j] = 2.0; 0, comm ); c[j] = 3.0; } return errCount; } scalar = 3.0; int HPCC_Stream(HPCC_Params *params, int doIO) { register int j; double scalar; for (j=0; j<VectorSize; j++) VectorSize = HPCC_LocalVectorSize( params, 3, a[j] = b[j]+scalar*c[j]; sizeof(double), 0 ); HPCC_free(c); a = HPCC_XMALLOC( double, VectorSize ); HPCC_free(b); b = HPCC_XMALLOC( double, VectorSize ); HPCC_free(a); c = HPCC_XMALLOC( double, VectorSize ); return 0; } 9 #include <hpcc.h> MPI + OpenMP #ifdef _OPENMP if (!a || !b || !c) { #include <omp.h> if (c) HPCC_free(c); #endif if (b) HPCC_free(b); if (a) HPCC_free(a); static int VectorSize; if (doIO) { static double *a, *b, *c; fprintf( outFile, "Failed to allocate memory (%d).\n", VectorSize ); int HPCC_StarStream(HPCC_Params *params) { fclose( outFile ); int myRank, commSize; } int rv, errCount; return 1; MPI_Comm comm = MPI_COMM_WORLD; } MPI_Comm_size( comm, &commSize ); #ifdef _OPENMP MPI_Comm_rank( comm, &myRank ); #pragma omp parallel for #endif rv = HPCC_Stream( params, 0 == myRank); for (j=0; j<VectorSize; j++) { MPI_Reduce( &rv, &errCount, 1, MPI_INT, MPI_SUM, b[j] = 2.0; 0, comm ); c[j] = 3.0; } return errCount; } scalar = 3.0; int HPCC_Stream(HPCC_Params *params, int doIO) { #ifdef _OPENMP register int j; #pragma omp parallel for double scalar; #endif for (j=0; j<VectorSize; j++) VectorSize = HPCC_LocalVectorSize( params, 3, a[j] = b[j]+scalar*c[j]; sizeof(double), 0 ); HPCC_free(c); a = HPCC_XMALLOC( double, VectorSize ); HPCC_free(b); b = HPCC_XMALLOC( double, VectorSize ); HPCC_free(a); c = HPCC_XMALLOC( double, VectorSize ); return 0; } 10 MPI + OpenMP CUDA #include <hpcc.h> #ifdef _OPENMP #define N 2000000 #include <omp.h> #endif static int VectorSize; int main() { static double *a, *b, *c; float *d_a, *d_b, *d_c; int HPCC_StarStream(HPCC_Params *params) { float scalar; int myRank, commSize; int rv, errCount; MPI_Comm comm = MPI_COMM_WORLD; cudaMalloc((void**)&d_a, sizeof(float)*N); MPI_Comm_size( comm, &commSize ); cudaMalloc((void**)&d_b, sizeof(float)*N); MPI_Comm_rank( comm, &myRank ); cudaMalloc((void**)&d_c, sizeof(float)*N); rv = HPCC_Stream( params, 0 == myRank); MPI_Reduce( &rv, &errCount, 1, MPI_INT, MPI_SUM, 0, comm ); return errCount; dim3 dimBlock(128); } dim3 dimGrid(N/dimBlock.x ); int HPCC_Stream(HPCC_Params *params, int doIO) { if( N % dimBlock.x != 0 ) dimGrid.x+=1; register int j; double scalar; VectorSize = HPCC_LocalVectorSize( params, 3, sizeof(double), 0 ); set_array<<<dimGrid,dimBlock>>>(d_b, 2.0f, N); set_array<<<dimGrid,dimBlock>>>(d_c, 3.0f, N); a = HPCC_XMALLOC( double, VectorSize );Where is programmer productivity ?? b = HPCC_XMALLOC( double, VectorSize ); c = HPCC_XMALLOC( double, VectorSize ); scalar=3.0f; if (!a || !b || !c) { STREAM_Triad<<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N); if (c) HPCC_free(c); if (b) HPCC_free(b); cudaThreadSynchronize(); if (a) HPCC_free(a); if (doIO) { fprintf( outFile, "Failed to allocate memory (%d).\n", VectorSize ); cudaFree(d_a); fclose( outFile ); } cudaFree(d_b); return 1; cudaFree(d_c); } #ifdef _OPENMP } #pragma omp parallel for #endif __global__ void set_array(float *a, float value, int len) { for (j=0; j<VectorSize; j++) { b[j] = 2.0; int idx = threadIdx.x + blockIdx.x * blockDim.x; c[j] = 3.0; } if (idx < len) a[idx] = value; scalar = 3.0; } #ifdef _OPENMP #pragma omp parallel for __global__ void STREAM_Triad( float *a, float *b, float *c, #endif for (j=0; j<VectorSize; j++) float scalar, int len) { a[j] = b[j]+scalar*c[j]; int idx = threadIdx.x + blockIdx.x * blockDim.x; HPCC_free(c); HPCC_free(b); if (idx < len) c[idx] = a[idx]+scalar*b[idx]; HPCC_free(a); } return 0; } 11 Examples: Type of HW Parallelism Programming Model Unit of Parallelism Inter -node MPI process Intra -node/multicore OpenMP/pthreads iteration/task Instruction -level vectors/threads pragmas iteration GPU/accelerator CUDA/OpenCL/OpenAcc SIMD function/task HPC has traditionally given users… …low-level, control-centric programming models …ones that are closely tied to the underlying hardware …ones that support only a single type of parallelism benefits: lots of control; decent generality; easy to implement downsides: lots of user-managed detail; brittle to changes 12 MPI + OpenMP CUDA #include <hpcc.h> #ifdef _OPENMP #defineChapel N 2000000 #include <omp.h> #endif static int VectorSize; int main() { static double *a, *b, *c; config const m = 1000, float *d_a, *d_b, *d_c; int HPCC_StarStream(HPCC_Params *params) { float scalar; int myRank, commSize; alpha = 3.0; int rv, errCount; MPI_Comm comm = MPI_COMM_WORLD; cudaMalloc((void**)&d_a, sizeof(float)*N); MPI_Comm_size( comm, &commSize ); cudaMalloc((void**)&d_b, sizeof(float)*N); MPI_Comm_rank( comm, &myRank ); const ProblemSpace = [1..m] dmapped …; the special cudaMalloc((void**)&d_c, sizeof(float)*N); rv = HPCC_Stream( params, 0 == myRank); MPI_Reduce( &rv, &errCount, 1, MPI_INT, MPI_SUM, 0, comm ); sauce return errCount; dim3 dimBlock(128); A, B, C: [ProblemSpace] } var real; dim3 dimGrid(N/dimBlock.x ); int HPCC_Stream(HPCC_Params *params, int doIO) { if( N % dimBlock.x != 0 ) dimGrid.x+=1; register int j; double scalar; B = 2.0; VectorSize = HPCC_LocalVectorSize( params, 3, sizeof(double), 0 ); set_array<<<dimGrid,dimBlock>>>(d_b, .5f, N); C = 3.0; set_array<<<dimGrid,dimBlock>>>(d_c, .5f, N); a = HPCC_XMALLOC( double, VectorSize ); b = HPCC_XMALLOC( double, VectorSize ); c = HPCC_XMALLOC( double, VectorSize ); scalar=3.0f; if (!a || !b || !c) { A = B + alpha * C; STREAM_Triad<<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N); if (c) HPCC_free(c); if (b) HPCC_free(b); cudaThreadSynchronize(); if (a) HPCC_free(a); if (doIO) { fprintf( outFile, "Failed to allocate memory (%d).\n", VectorSize ); cudaFree(d_a); fclose( outFile ); } cudaFree(d_b); return 1; cudaFree(d_c); } #ifdef _OPENMP } #pragma omp parallel for #endif __global__ void set_array(float *a, float value, int len) { for (j=0; j<VectorSize; j++) { b[j] = 2.0; int idx = threadIdx.x + blockIdx.x * blockDim.x; c[j] = 3.0; } if (idx < len) a[idx] = value; scalar = 3.0; } #ifdef _OPENMP #pragma omp parallelPhilosophy: for Good language design__global__ can void tease STREAM_Triad( details floatof locality *a, float and *b, float *c, #endif for (j=0; j<VectorSize; j++) float scalar, int len) { a[j] = b[j]+scalar*c[j];parallelism away from an algorithm, permitting the compiler, runtime, int idx = threadIdx.x + blockIdx.x * blockDim.x; HPCC_free(c); HPCC_free(b); if (idx < len) c[idx] = a[idx]+scalar*b[idx]; HPCC_free(a);applied scientist, and HPC expert} each to focus on their strengths. return 0; } 13 Chapel: a parallel language that has emerged from DARPA HPCS general parallelism: data-, task-, and nested parallelism highly dynamic multithreading or static SPMD-style multiresolution philosophy: high-level features built on low-level to provide “manual overrides” to support a separation of concerns (application vs. parallel experts) locality control: explicit or data-driven placement of data and tasks locality expressed distinctly from parallelism features for productivity: type inference, iterators, rich array types portable: designed and implemented to support diverse systems open source: developed and distributed under the BSD license plausibly adoptable: forward-thinking HPC users want a mature version 14 Motivation Multiresolution programming Empirical Evaluation About the Project 15 1) General Parallel Programming 2) Global-View Abstractions 3) Multiresolution
Recommended publications
  • Introduction to Openacc 2018 HPC Workshop: Parallel Programming
    Introduction to OpenACC 2018 HPC Workshop: Parallel Programming Alexander B. Pacheco Research Computing July 17 - 18, 2018 CPU vs GPU CPU : consists of a few cores optimized for sequential serial processing GPU : has a massively parallel architecture consisting of thousands of smaller, more efficient cores designed for handling multiple tasks simultaneously GPU enabled applications 2 / 45 CPU vs GPU CPU : consists of a few cores optimized for sequential serial processing GPU : has a massively parallel architecture consisting of thousands of smaller, more efficient cores designed for handling multiple tasks simultaneously GPU enabled applications 2 / 45 CPU vs GPU CPU : consists of a few cores optimized for sequential serial processing GPU : has a massively parallel architecture consisting of thousands of smaller, more efficient cores designed for handling multiple tasks simultaneously GPU enabled applications 2 / 45 CPU vs GPU CPU : consists of a few cores optimized for sequential serial processing GPU : has a massively parallel architecture consisting of thousands of smaller, more efficient cores designed for handling multiple tasks simultaneously GPU enabled applications 2 / 45 3 / 45 Accelerate Application for GPU 4 / 45 GPU Accelerated Libraries 5 / 45 GPU Programming Languages 6 / 45 What is OpenACC? I OpenACC Application Program Interface describes a collection of compiler directive to specify loops and regions of code in standard C, C++ and Fortran to be offloaded from a host CPU to an attached accelerator. I provides portability across operating systems, host CPUs and accelerators History I OpenACC was developed by The Portland Group (PGI), Cray, CAPS and NVIDIA. I PGI, Cray, and CAPs have spent over 2 years developing and shipping commercial compilers that use directives to enable GPU acceleration as core technology.
    [Show full text]
  • GPU Computing with Openacc Directives
    Introduction to OpenACC Directives Duncan Poole, NVIDIA Thomas Bradley, NVIDIA GPUs Reaching Broader Set of Developers 1,000,000’s CAE CFD Finance Rendering Universities Data Analytics Supercomputing Centers Life Sciences 100,000’s Oil & Gas Defense Weather Research Climate Early Adopters Plasma Physics 2004 Present Time 3 Ways to Accelerate Applications Applications OpenACC Programming Libraries Directives Languages “Drop-in” Easily Accelerate Maximum Acceleration Applications Flexibility 3 Ways to Accelerate Applications Applications OpenACC Programming Libraries Directives Languages CUDA Libraries are interoperable with OpenACC “Drop-in” Easily Accelerate Maximum Acceleration Applications Flexibility 3 Ways to Accelerate Applications Applications OpenACC Programming Libraries Directives Languages CUDA Languages are interoperable with OpenACC, “Drop-in” Easily Accelerate too! Maximum Acceleration Applications Flexibility NVIDIA cuBLAS NVIDIA cuRAND NVIDIA cuSPARSE NVIDIA NPP Vector Signal GPU Accelerated Matrix Algebra on Image Processing Linear Algebra GPU and Multicore NVIDIA cuFFT Building-block Sparse Linear C++ STL Features IMSL Library Algorithms for CUDA Algebra for CUDA GPU Accelerated Libraries “Drop-in” Acceleration for Your Applications OpenACC Directives CPU GPU Simple Compiler hints Program myscience Compiler Parallelizes code ... serial code ... !$acc kernels do k = 1,n1 do i = 1,n2 OpenACC ... parallel code ... Compiler Works on many-core GPUs & enddo enddo Hint !$acc end kernels multicore CPUs ... End Program myscience
    [Show full text]
  • Openacc Course October 2017. Lecture 1 Q&As
    OpenACC Course October 2017. Lecture 1 Q&As. Question Response I am currently working on accelerating The GCC compilers are lagging behind the PGI compilers in terms of code compiled in gcc, in your OpenACC feature implementations so I'd recommend that you use the PGI experience should I grab the gcc-7 or compilers. PGI compilers provide community version which is free and you try to compile the code with the pgi-c ? can use it to compile OpenACC codes. New to OpenACC. Other than the PGI You can find all the supported compilers here, compiler, what other compilers support https://www.openacc.org/tools OpenACC? Is it supporting Nvidia GPU on TK1? I Currently there isn't an OpenACC implementation that supports ARM think, it must be processors including TK1. PGI is considering adding this support sometime in the future, but for now, you'll need to use CUDA. OpenACC directives work with intel Xeon Phi is treated as a MultiCore x86 CPU. With PGI you can use the "- xeon phi platform? ta=multicore" flag to target multicore CPUs when using OpenACC. Does it have any application in field of In my opinion OpenACC enables good software engineering practices by Software Engineering? enabling you to write a single source code, which is more maintainable than having to maintain different code bases for CPUs, GPUs, whatever. Does the CPU comparisons include I think that the CPU comparisons include standard vectorisation that the simd vectorizations? PGI compiler applies, but no specific hand-coded vectorisation optimisations or intrinsic work. Does OpenMP also enable OpenMP does have support for GPUs, but the compilers are just now parallelization on GPU? becoming available.
    [Show full text]
  • Multi-Threaded GPU Accelerration of ORBIT with Minimal Code
    Princeton Plasma Physics Laboratory PPPL- 4996 4996 Multi-threaded GPU Acceleration of ORBIT with Minimal Code Modifications Ante Qu, Stephane Ethier, Eliot Feibush and Roscoe White FEBRUARY, 2014 Prepared for the U.S. Department of Energy under Contract DE-AC02-09CH11466. Princeton Plasma Physics Laboratory Report Disclaimers Full Legal Disclaimer This report was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor any agency thereof, nor any of their employees, nor any of their contractors, subcontractors or their employees, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or any third party’s use or the results of such use of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise, does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or any agency thereof or its contractors or subcontractors. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof. Trademark Disclaimer Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise, does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or any agency thereof or its contractors or subcontractors. PPPL Report Availability Princeton Plasma Physics Laboratory: http://www.pppl.gov/techreports.cfm Office of Scientific and Technical Information (OSTI): http://www.osti.gov/bridge Related Links: U.S.
    [Show full text]
  • HPVM: Heterogeneous Parallel Virtual Machine
    HPVM: Heterogeneous Parallel Virtual Machine Maria Kotsifakou∗ Prakalp Srivastava∗ Matthew D. Sinclair Department of Computer Science Department of Computer Science Department of Computer Science University of Illinois at University of Illinois at University of Illinois at Urbana-Champaign Urbana-Champaign Urbana-Champaign [email protected] [email protected] [email protected] Rakesh Komuravelli Vikram Adve Sarita Adve Qualcomm Technologies Inc. Department of Computer Science Department of Computer Science [email protected]. University of Illinois at University of Illinois at com Urbana-Champaign Urbana-Champaign [email protected] [email protected] Abstract hardware, and that runtime scheduling policies can make We propose a parallel program representation for heteroge- use of both program and runtime information to exploit the neous systems, designed to enable performance portability flexible compilation capabilities. Overall, we conclude that across a wide range of popular parallel hardware, including the HPVM representation is a promising basis for achieving GPUs, vector instruction sets, multicore CPUs and poten- performance portability and for implementing parallelizing tially FPGAs. Our representation, which we call HPVM, is a compilers for heterogeneous parallel systems. hierarchical dataflow graph with shared memory and vector CCS Concepts • Computer systems organization → instructions. HPVM supports three important capabilities for Heterogeneous (hybrid) systems; programming heterogeneous systems: a compiler interme- diate representation (IR), a virtual instruction set (ISA), and Keywords Virtual ISA, Compiler, Parallel IR, Heterogeneous a basis for runtime scheduling; previous systems focus on Systems, GPU, Vector SIMD only one of these capabilities. As a compiler IR, HPVM aims to enable effective code generation and optimization for het- 1 Introduction erogeneous systems.
    [Show full text]
  • Openacc Getting Started Guide
    OPENACC GETTING STARTED GUIDE Version 2018 TABLE OF CONTENTS Chapter 1. Overview............................................................................................ 1 1.1. Terms and Definitions....................................................................................1 1.2. System Prerequisites..................................................................................... 2 1.3. Prepare Your System..................................................................................... 2 1.4. Supporting Documentation and Examples............................................................ 3 Chapter 2. Using OpenACC with the PGI Compilers...................................................... 4 2.1. OpenACC Directive Summary........................................................................... 4 2.2. CUDA Toolkit Versions....................................................................................6 2.3. C Structs in OpenACC....................................................................................8 2.4. C++ Classes in OpenACC.................................................................................9 2.5. Fortran Derived Types in OpenACC...................................................................13 2.6. Fortran I/O............................................................................................... 15 2.6.1. OpenACC PRINT Example......................................................................... 15 2.7. OpenACC Atomic Support.............................................................................
    [Show full text]
  • Introduction to GPU Programming with CUDA and Openacc
    Introduction to GPU Programming with CUDA and OpenACC Alabama Supercomputer Center 1 Alabama Research and Education Network Contents Topics § Why GPU chips and CUDA? § GPU chip architecture overview § CUDA programming § Queue system commands § Other GPU programming options § OpenACC programming § Comparing GPUs to other processors 2 What is a GPU chip? GPU § A Graphic Processing Unit (GPU) chips is an adaptation of the technology in a video rendering chip to be used as a math coprocessor. § The earliest graphic cards simply mapped memory bytes to screen pixels – i.e. the Apple ][ in 1980. § The next generation of graphics cards (1990s) had 2D rendering capabilities for rendering lines and shaded areas. § Graphics cards started accelerating 3D rendering with standards like OpenGL and DirectX in the early 2000s. § The most recent graphics cards have programmable processors, so that game physics can be offloaded from the main processor to the GPU. § A series of GPU chips sometimes called GPGPU (General Purpose GPU) have double precision capability so that they can be used as math coprocessors. 3 Why GPUs? GPU Comparison of peak theoretical GFLOPs and memory bandwidth for NVIDIA GPUs and Intel CPUs over the past few years. Graphs from the NVIDIA CUDA C Programming Guide 4.0. 4 CUDA Programming Language CUDA The GPU chips are massive multithreaded, manycore SIMD processors. SIMD stands for Single Instruction Multiple Data. Previously chips were programmed using standard graphics APIs (DirectX, OpenGL). CUDA, an extension of C, is the most popular GPU programming language. CUDA can also be called from a C++ program. The CUDA standard has no FORTRAN support, but Portland Group sells a third party CUDA FORTRAN.
    [Show full text]
  • Investigation of the Opencl SYCL Programming Model
    Investigation of the OpenCL SYCL Programming Model Angelos Trigkas August 22, 2014 MSc in High Performance Computing The University of Edinburgh Year of Presentation: 2014 Abstract OpenCL SYCL is a new heterogeneous and parallel programming framework created by the Khronos Group that tries to bring OpenCL programming into C++. In particular, it enables C++ developers to create OpenCL kernels, using all the popular C++ features, such as classes, inheritance and templates. What is more, it dramatically reduces programming effort and complexity, by letting the runtime system handle system resources and data transfers. In this project we investigate Syclone, a prototype SYCL implementation that we acquired from Codeplay Ltd. To do that we compare SYCL in terms of both programmability and performance with another two established parallel programming models, OpenCL and OpenMP. In particular, we have chosen six scientific applications and ported them in all three models. Then we run benchmarking experiments to assess their performance on an Intel Xeon Phi based platform. The results have shown that SYCL does great from a programmability perspective. However, its performance still does not match that of OpenCL or OpenMP. Contents Chapter 1 Introduction .......................................................................................................... 1 1.1 Motivation ...................................................................................................................................... 1 1.2 Project Goals ................................................................................................................................
    [Show full text]
  • PGI Accelerator with Openacc Getting Started Guide
    OPENACC GETTING STARTED GUIDE Version 2020 TABLE OF CONTENTS Chapter 1. Overview............................................................................................ 1 1.1. Terms and Definitions....................................................................................1 1.2. System Prerequisites..................................................................................... 2 1.3. Prepare Your System..................................................................................... 2 1.4. Supporting Documentation and Examples............................................................ 3 Chapter 2. Using OpenACC with the PGI Compilers...................................................... 4 2.1. OpenACC Directive Summary........................................................................... 4 2.2. CUDA Toolkit Versions....................................................................................6 2.3. Compute Capability...................................................................................... 8 2.4. C Structs in OpenACC....................................................................................9 2.5. C++ Classes in OpenACC............................................................................... 11 2.6. Fortran Derived Types in OpenACC...................................................................14 2.7. Fortran I/O............................................................................................... 16 2.7.1. OpenACC PRINT Example........................................................................
    [Show full text]
  • Concurrent Parallel Processing on Graphics and Multicore Processors with Openacc and Openmp
    Concurrent parallel processing on Graphics and Multicore Processors with OpenACC and OpenMP Christopher P. Stone, DoD HPCMP PETTT Program Prof. Roger L. Davis and Daryl Lee, U. of California Davis ​Distribution Statement 1: Approved for public release: distribution unlimited. Acknowledgments This material is based upon work supported by, or in part by, the Department of Defense High Performance Computing Modernization Program (HPCMP) under User Productivity, Technology Transfer and Training (PETTT) contract number GS04T09DBC0017. ​Distribution Statement A: Approved for public release: distribution unlimited. 2 Background MBFLO3 is a general-purpose, three-dimensional, multi-block structured- grid, finite-volume, multidisciplinary solution procedure – Primary application of MBFLO3 is modeling turbomachinery (e.g., compressors and turbines) for aerospace. – Explicit Lax-Wendroff control-volume, time-marching scheme – Multi-block structured-grids with both point-matched and overset interface capabilities – Multigrid acceleration for steady flows; point-implicit dual time-step for unsteady flows. – Conjugate heat transfer modeling via coupled fluid-thermal solvers. ​Distribution Statement A: Approved for public release: distribution unlimited. 3 Motivation MBFLO3 originally used only MPI for parallelism across CPU cores. – One MPI rank per mesh block. – Blocks partitioned with STAGE3 preprocessor / grid-generator. – Arbitrary mesh block subdivision not available for point-matched, multi-block grids. Fixed mesh partitioning limits strong scaling, especially as we transition to many-core devices. – On-chip (cache) memory per-core has been stagnant for multi-core devices and memory per-core on newer many-core devices is lower. ​Distribution Statement A: Approved for public release: distribution unlimited. 4 Goals Redesign / refactor MBFLO3 to harness – single instruction multiple data (SIMD) vectorization … i.e., improve fine-grain data parallelism.
    [Show full text]
  • Introduction to Openmp & Openacc
    INTRODUCTION TO OPENMP & OPENACC Kadin Tseng Boston University Scientific Computing and Visualization Introduction to OpenMP & OpenACC 2 Outline • Introduction to OpenMP • Introduction to OpenACC Introduction to OpenMP & OpenACC 3 Introduction to OpenMP (for CPUs) • Types of parallel machines • distributed memory • each processor has its own memory address space • variable values are independent x = 2 on one processor, x = 3 on a different processor • example: the nodes of a Linux cluster, like SCC’s nodes • shared memory • also called Symmetric Multiprocessing (SMP) • single address space for all processors • If one processor sets x = 2 , x will also equal 2 on other processors (unless specified otherwise) • example: cores within each SCC node Introduction to OpenMP & OpenACC 4 Shared vs. Distributed Memory CPU 0 CPU 1 CPU 2 CPU 3 CPU 0 CPU 1 CPU 2 CPU 3 MEM 0 MEM 1 MEM 2 MEM 3 MEM distributed shared Introduction to OpenMP & OpenACC 5 Shared Computing Cluster (SCC) Node A Node X Node Z CPU 0 CPU 1 CPU .. CPU n CPU 0 CPU 1 CPU .. CPU m MEM A MEM Z shared shared distributed among nodes Introduction to OpenMP & OpenACC 6 What is OpenMP ? • Application Programming Interface (API) for multithreaded parallelization consisting of • Source code directives • Functions • Environment variables • Advantage • Easy to use • Incremental parallelization • Flexible -- Loop-level or coarse-grain • Portable • Work on any SMP machine (e.g., each individual SCC node) • Disadvantage • Shared-memory systems only (i.e., not across SCC’s nodes) Introduction to
    [Show full text]
  • Introduction to Openacc
    Introduction to OpenACC Oscar Hernandez, Richard Graham Computer Science and Mathematics (CSM) Application Performance Tools Group What is OpenACC? • Accelerator programming API standard to program accelerators – Portable across operating systems and various types of host CPUs and accelerators. – Allows parallel programmers to provide simple hints, known as “directives,” to the compiler, identifying which areas of code to accelerate, without requiring programmers to modify or adapt the underlying code itself. – Aimed at incremental development of accelerator code • Effort driven by vendors with the input from users/ applications 2 Managed by UT-Battelle for the U.S. Department of Energy OpenACC Vendor Support • The current vendors support OpenACC are: – Cray: High-Level GPU directives – PGI: PGI accelerator directives – CAPS Enterprise: HMPP – NVIDIA: CUDA, OpenCL – Others: As this defacto standard gains traction • Strong interaction with the OpenMP accelerator subcomittee with input from other institutions (including the above): Texas Instruments, Intel, AMD, IBM, Oracle, ORNL, LLNL, Compunity, UH, BSC, EPCC, TACC, and TU-Dresden. 3 Managed by UT-Battelle for the U.S. Department of Energy Impact of OpenACC • Phase 1: First Standardization of High-Level GPU directives. [Short-term, Mid-term] – Heavily influenced by NVIDIA hardware. • Phase 2: Experiences from OpenACC will drive the effort of OpenMP for Accelerators – More general solution – Might take years to develop – Better interoperability with OpenMP 4 Managed by UT-Battelle for the U.S. Department of Energy Overview of the OpenACC directives • Directives facilitate code development for accelerators • Provide the functionality to: – Initiate accelerator startup/shutdown – Manage data or program transfers between host (CPU) and accelerator – Scope data between accelerator and host (CPU) – Manage the work between the accelerator and host.
    [Show full text]