Accelerating Hpc Applications on Nvidia Gpus

Total Page:16

File Type:pdf, Size:1020Kb

Accelerating Hpc Applications on Nvidia Gpus ACCELERATING HPC APPLICATIONS ON NVIDIA GPUS WITH OPENACC Doug Miles, PGI Compilers & Tools, NVIDIA High Performance Computing Advisory Council February 21, 2018 PGI — THE NVIDIA HPC SDK Fortran, C & C++ Compilers Optimizing, SIMD Vectorizing, OpenMP Accelerated Computing Features CUDA Fortran, OpenACC Directives Multi-Platform Solution X86-64 and OpenPOWER Multicore CPUs NVIDIA Tesla GPUs Supported on Linux, macOS, Windows MPI/OpenMP/OpenACC Tools Debugger Performance Profiler Interoperable with DDT, TotalView 2 Programming GPU-Accelerated Systems Separate CPU System and GPU Memories GPU Developer View PCIe System GPU Memory Memory 3 Programming GPU-Accelerated Systems Separate CPU System and GPU Memories GPU Developer View NVLink System GPU Memory Memory 4 CUDA FORTRAN attributes(global) subroutine mm_kernel ( A, B, C, N, M, L ) real :: A(N,M), B(M,L), C(N,L), Cij integer, value :: N, M, L real, device, allocatable, dimension(:,:) :: integer :: i, j, kb, k, tx, ty Adev,Bdev,Cdev real, shared :: Asub(16,16),Bsub(16,16) tx = threadidx%x . ty = threadidx%y i = blockidx%x * 16 + tx allocate (Adev(N,M), Bdev(M,L), Cdev(N,L)) j = blockidx%y * 16 + ty Adev = A(1:N,1:M) Cij = 0.0 Bdev = B(1:M,1:L) do kb = 1, M, 16 Asub(tx,ty) = A(i,kb+tx-1) call mm_kernel <<<dim3(N/16,M/16),dim3(16,16)>>> Bsub(tx,ty) = B(kb+ty-1,j) ( Adev, Bdev, Cdev, N, M, L ) call syncthreads() do k = 1,16 C(1:N,1:L) = Cdev Cij = Cij + Asub(tx,k) * Bsub(k,ty) deallocate ( Adev, Bdev, Cdev ) enddo call syncthreads() . enddo C(i,j) = Cij end subroutine mmul_kernel CPU Code Tesla Code 5 module madd_device_module use cudafor implicit none contains attributes(global) subroutine madd_kernel(a,b,c,blocksum,n1,n2) real, dimension(:,:) :: a,b,c real, dimension(:) :: blocksum integer, value :: n1,n2 integer :: i,j,tindex,tneighbor,bindex real :: mysum real, shared :: bsum(256) CUDA FORTRAN ! Do this thread's work mysum = 0.0 do j = threadidx%y + (blockidx%y-1)*blockdim%y, n2, blockdim%y*griddim%y do i = threadidx%x + (blockidx%x-1)*blockdim%x, n1, blockdim%x*griddim%x !$CUF KERNEL Directives a(i,j) = b(i,j) + c(i,j) mysum = mysum + a(i,j) ! accumulates partial sum per thread enddo enddo ! Now add up all partial sums for the whole thread block ! Compute this thread's linear index in the thread block ! We assume 256 threads in the thread block tindex = threadidx%x + (threadidx%y-1)*blockdim%x module madd_device_module ! Store this thread's partial sum in the shared memory block bsum(tindex) = mysum call syncthreads() use cudafor ! Accumulate all the partial sums for this thread block to a single value tneighbor = 128 do while( tneighbor >= 1 ) contains if( tindex <= tneighbor ) & bsum(tindex) = bsum(tindex) + bsum(tindex+tneighbor) tneighbor = tneighbor / 2 subroutine madd_dev(a,b,c,sum,n1,n2) call syncthreads() enddo ! Store the partial sum for the thread block real,dimension(:,:),device :: a,b,c bindex = blockidx%x + (blockidx%y-1)*griddim%x if( tindex == 1 ) blocksum(bindex) = bsum(1) real :: sum end subroutine ! Add up partial sums for all thread blocks to a single cumulative sum attributes(global) subroutine madd_sum_kernel(blocksum,dsum,nb) integer :: n1,n2 real, dimension(:) :: blocksum real :: dsum integer, value :: nb type(dim3) :: grid, block real, shared :: bsum(256) integer :: tindex,tneighbor,i ! Again, we assume 256 threads in the thread block !$cuf kernel do (2) <<<(*,*),(32,4)>>> ! accumulate a partial sum for each thread tindex = threadidx%x bsum(tindex) = 0.0 do j = 1,n2 do i = tindex, nb, blockdim%x bsum(tindex) = bsum(tindex) + blocksum(i) enddo do i = 1,n1 call syncthreads() ! This code is copied from the previous kernel ! Accumulate all the partial sums for this thread block to a single value a(i,j) = b(i,j) + c(i,j) ! Since there is only one thread block, this single value is the final result tneighbor = 128 do while( tneighbor >= 1 ) sum = sum + a(i,j) if( tindex <= tneighbor ) & bsum(tindex) = bsum(tindex) + bsum(tindex+tneighbor) tneighbor = tneighbor / 2 enddo call syncthreads() enddo if( tindex == 1 ) dsum = bsum(1) enddo end subroutine subroutine madd_dev(a,b,c,dsum,n1,n2) end subroutine real, dimension(:,:), device :: a,b,c Equivalent real, device :: dsum real, dimension(:), allocatable, device :: blocksum end module integer :: n1,n2,nb type(dim3) :: grid, block hand-written integer :: r ! Compute grid/block size; block size must be 256 threads grid = dim3((n1+31)/32, (n2+7)/8, 1) block = dim3(32,8,1) CUDA kernels nb = grid%x * grid%y allocate(blocksum(1:nb)) call madd_kernel<<< grid, block >>>(a,b,c,blocksum,n1,n2) call madd_sum_kernel<<< 1, 256 >>>(blocksum,dsum,nb) r = cudaThreadSynchronize() ! don't deallocate too early 6 deallocate(blocksum) end subroutine end module OpenACC Directives Manage #pragma acc data copyin(a,b) copyout(c) Incremental Data { Movement ... Single source #pragma acc parallel { Initiate #pragma acc loop gang vector Interoperable Parallel for (i = 0; i < n; ++i) { Execution c[i] = a[i] + b[i]; Performance portable ... } CPU, GPU, Manycore Optimize } Loop ... Mappings } 7 OpenACC for GPUs in a Nutshell ... #pragma acc data copy(b[0:n][0:m]) \ create(a[0:n][0:m]) { p21 for (iter = 1; iter <= p; ++iter){ A #pragma acc parallel loop SS (B(B)) for (i = 1; i < n-1; ++i){ for (j = 1; j < m-1; ++j){ a[i][j]=w0*b[i][j]+ 1p w1*(b[i-1][j]+b[i+1][j]+ B S (B)(B) b[i][j-1]+b[i][j+1])+ w2*(b[i-1][j-1]+b[i-1][j+1]+ b[i+1][j-1]+b[i+1][j+1]); } } #pragma acc parallel loop for( i = 1; i < n-1; ++i ) for( j = 1; j < m-1; ++j ) System GPU b[i][j] = a[i][j]; Memory Memory } } ... 8 OpenACC is for Multicore, Manycore & GPUs 98 !$acc parallel 99 !$acc loop independent 100 do k=y_min-depth,y_max+depth 101 !$acc loop independent 102 do j=1,depth 103 density0(x_min-j,k)=left_density0(left_xmax+1-j,k) 104 enddo 105 enddo 106 !$acc end parallel Multicore CPU Tesla GPU % pgfortran -ta=multicore –fast –Minfo=acc -c \ % pgfortran -ta=tesla –fast -Minfo=acc –c \ update_tile_halo_kernel.f90 update_tile_halo_kernel.f90 . 100, Loop is parallelizable 100, Loop is parallelizable Generating Multicore code 102, Loop is parallelizable 100, !$acc loop gang Accelerator kernel generated 102, Loop is parallelizable Generating Tesla code 100, !$acc loop gang, vector(4) ! blockidx%y threadidx%y 102, !$acc loop gang, vector(32) ! blockidx%x threadidx%x 9 SPEC ACCEL 1.2 BENCHMARKS OpenACC OpenMP 4.5 200 200 PGI 18.1 Intel 2018 PGI 18.1 150 150 100 100 4.4x Speed-up GEOMEAN Seconds GEOMEAN GEOMEAN Seconds GEOMEAN 50 50 0 0 2-socket 1x Volta 2-socket Skylake 2-socket EPYC 2-socket Broadwell Broadwell V100 40 cores / 80 threads 48 cores / 48 threads 40 cores / 80 threads Performance measured February, 2018. Skylake: Two 20 core Intel Xeon Gold 6148 CPUs @ 2.4GHz w/ 376GB memory, hyperthreading enabled. EPYC: Two 24 core AMD EPYC 7451 CPUs @ 2.3GHz w/ 256GB memory. Broadwell: Two 20 core Intel Xeon E5-2698 v4 CPUs @ 3.6GHz w/ 256GB memory, hyperthreading enabled. Volta: NVIDIA DGX1 system with two 20 core Intel Xeon E5-2698 v4 CPUs @ 2.20GHz, 256GB memory, one NVIDIA Tesla V100-SXM2-16GB GPU @ 1.53GHz. SPEC® is a registered trademark of the Standard Performance Evaluation Corporation (www.spec.org). OPENACC APPLICATIONS 11 Parallelization Strategy Within Gaussi an 16, GPUs are used for a sm all fraction of code that consumes a large fraction of the execution time. T e implementation of GPU parallelism conforms to Gaussi an’s general parallelization strategy. Its main tenets are to avoid changing the underlying source code and to avoid modif cations which negatively af ect CPU per formance. For these reasons, OpenACC was used for GPU parallelization. PGI Accelerator Compilers with OpenACC PGI compilers fully support the current OpenACC standard as wel l as important extensions to it. PGI is an important contributor to the ongoing development of OpenACC. OpenACC enables developer s to implement GPU parallelism by adding compiler directives to their source code, of en eliminating the need for rewriting or restructuring. For example, the following Fortran compiler directive identif es a loop which the compiler should parallelize: !$acc paral l el l oop Other directives allocate GPU memory, copy data to/from GPUs, specify data to remain on the GPU, combine or split loops and other code sections, and gener ally provide hints for optimal work T e Gaussian approach to parallelization relies on environment-speci f c parallelization distribution management, and more. frameworks and tools: OpenMP for shared-memory, Linda for cluster and net work parallelization across discret e nodes, and OpenACC for GPUs. T e OpenACC project is ver y active, and the speci f cations and tools are changing fairly rapidly. T is has been true throughout the T e process of implementing GPU support involved many dif er ent aspects: lifet ime of this project. Indeed, one of its major Identifying places wher e GPUs could be benef ci al. T ese are a subset of areas which challenges has been using OpenACC in the midst of its development. T e talented people at PGI are parallelized for other execution contexts because using GPUs requires f ne grained wer e instrumental in addressing issues that arose parallelism. in one of the ver y f rst uses of OpenACC for a Understanding and optimizing data movem ent/storage at a high level to maximize large commer cial sof ware package.
Recommended publications
  • Desarrollo Del Juego Sky Fighter Mediante XNA 3.1 Para PC
    Departamento de Informática PROYECTO FIN DE CARRERA Desarrollo del juego Sky Fighter mediante XNA 3.1 para PC Autor: Íñigo Goicolea Martínez Tutor: Juan Peralta Donate Leganés, abril de 2011 Proyecto Fin de Carrera Alumno: Íñigo Goicolea Martínez Sky Fighter Tutor: Juan Peralta Donate Agradecimientos Este proyecto es la culminación de muchos meses de trabajo, y de una carrera a la que llevo dedicando más de cinco años. En estas líneas me gustaría recordar y agradecer a todas las personas que me han permitido llegar hasta aquí. En primer lugar a mis padres, Antonio y Lola, por el apoyo que me han dado siempre. Por creer en mí y confiar en que siempre voy a ser capaz de salir adelante y no dudar jamás de su hijo. Y lo mismo puedo decir de mis dos hermanos, Antonio y Manuel. A Juan Peralta, mi tutor, por darme la oportunidad de realizar este proyecto que me ha permitido acercarme más al mundo de los videojuegos, algo en lo que querría trabajar. Pese a que él también estaba ocupado con su tesis doctoral, siempre ha sacado tiempo para resolver dudas y aportar sugerencias. A Sergio, Antonio, Toño, Alberto, Dani, Jorge, Álvaro, Fernando, Marta, Carlos, otro Antonio y Javier. Todos los compañeros, y amigos, que he hecho y que he tenido a lo largo de la carrera y gracias a los cuales he podido llegar hasta aquí. Por último, y no menos importante, a los demás familiares y amigos con los que paso mucho tiempo de mi vida, porque siempre están ahí cuando hacen falta.
    [Show full text]
  • A Data-Driven Approach for Personalized Drama Management
    A DATA-DRIVEN APPROACH FOR PERSONALIZED DRAMA MANAGEMENT A Thesis Presented to The Academic Faculty by Hong Yu In Partial Fulfillment of the Requirements for the Degree Doctor of Philosophy in the School of Computer Science Georgia Institute of Technology August 2015 Copyright © 2015 by Hong Yu A DATA-DRIVEN APPROACH FOR PERSONALIZED DRAMA MANAGEMENT Approved by: Dr. Mark O. Riedl, Advisor Dr. David Roberts School of Interactive Computing Department of Computer Science Georgia Institute of Technology North Carolina State University Dr. Charles Isbell Dr. Andrea Thomaz School of Interactive Computing School of Interactive Computing Georgia Institute of Technology Georgia Institute of Technology Dr. Brian Magerko Date Approved: April 23, 2015 School of Literature, Media, and Communication Georgia Institute of Technology To my family iii ACKNOWLEDGEMENTS First and foremost, I would like to express my most sincere gratitude and appreciation to Mark Riedl, who has been my advisor and mentor throughout the development of my study and research at Georgia Tech. He has been supportive ever since the days I took his Advanced Game AI class and entered the Entertainment Intelligence lab. Thanks to him I had the opportunity to work on the interactive narrative project which turned into my thesis topic. Without his consistent guidance, encouragement and support, this dissertation would never have been successfully completed. I would also like to gratefully thank my dissertation committee, Charles Isbell, Brian Magerko, David Roberts and Andrea Thomaz for their time, effort and the opportunities to work with them. Their expertise, insightful comments and experience in multiple research fields have been really beneficial to my thesis research.
    [Show full text]
  • Design and Performance Evaluation of a Software Framework for Multi-Physics Simulations on Heterogeneous Supercomputers
    Design and Performance Evaluation of a Software Framework for Multi-Physics Simulations on Heterogeneous Supercomputers Entwurf und Performance-Evaluierung eines Software-Frameworks für Multi-Physik-Simulationen auf heterogenen Supercomputern Der Technischen Fakultät der Universität Erlangen-Nürnberg zur Erlangung des Grades Doktor-Ingenieur vorgelegt von Dipl.-Inf. Christian Feichtinger Erlangen, 2012 Als Dissertation genehmigt von der Technischen Fakultät der Universität Erlangen-Nürnberg Tag der Einreichung: 11. Juni 2012 Tag der Promotion: 24. July 2012 Dekan: Prof. Dr. Marion Merklein Berichterstatter: Prof. Dr. Ulrich Rüde Prof. Dr. Takayuki Aoki Prof. Dr. Gerhard Wellein Abstract Despite the experience of several decades the numerical simulation of computa- tional fluid dynamics is still an enormously challenging and active research field. Most simulation tasks of scientific and industrial relevance require the model- ing of multiple physical effects, complex numerical algorithms, and have to be executed on supercomputers due to their high computational demands. Fac- ing these complexities, the reimplementation of the entire functionality for each simulation task, forced by inflexible, non-maintainable, and non-extendable im- plementations is not feasible and bound to fail. The requirements to solve the involved research objectives can only be met in an interdisciplinary effort and by a clean and structured software development process leading to usable, main- tainable, and efficient software designs on all levels of the resulting software framework. The major scientific contribution of this thesis is the thorough design and imple- mentation of the software framework WaLBerla that is suitable for the simulation of multi-physics simulation tasks centered around the lattice Boltzmann method. The design goal of WaLBerla is to be usable, maintainable, and extendable as well as to enable efficient and scalable implementations on massively parallel super- computers.
    [Show full text]
  • Basado En Imágenes Parametrizadas Sobre Resnet Para IdentiCar Intrusiones En 'Smartwatches' U Otros Dispositivos ANes
    IA eñ ™ • Publicaciones de autores 'Framework' basado en imágenes parametrizadas sobre ResNet para identicar intrusiones en 'smartwatches' u otros dispositivos anes. (Un eje singular de la publicación “Estado del arte de la ciencia de datos en el idioma español y su aplicación en el campo de la Inteligencia Articial”) Juan Antonio Lloret Egea, Celia Medina Lloret, Adrián Hernández González, Diana Díaz Raboso, Carlos Campos, Kimberly Riveros Guzmán, Adrián Pérez Herrera, Luis Miguel Cortés Carballo, Héctor Miguel Terrés Lloret License: Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC-BY-NC-ND 4.0) IA eñ ™ • Publicaciones de autores aplicación en el campo de la Inteligencia Articial”) Abstracto Se ha definido un framework1 conceptual y algebraicamente, inexistente hasta ahora en su morfología, y pionero en aplicación en el campo de la Inteligencia Artificial (IA) de forma conjunta; e implementado en laboratorio, en sus aspectos más estructurales, como un modelo completamente operacional. Su mayor aportación a la IA a nivel cualitativo es aplicar la conversión o transducción de parámetros obtenidos con lógica ternaria[1] (sistemas multivaluados)2 y asociarlos a una imagen, que es analizada mediante una red residual artificial ResNet34[2],[3] para que nos advierta de una intrusión. El campo de aplicación de este framework va desde smartwaches, tablets y PC's hasta la domótica basada en el estándar KNX[4]. Abstract note The full version of this document in the English language will be available in this link. Código QR de la publicación Este marco propone para la IA una ingeniería inversa de tal modo que partiendo de principios matemáticos conocidos y revisables, aplicados en una imagen gráfica en 2D para detectar intrusiones, sea escrutada la privacidad y la seguridad de un dispositivo mediante la inteligencia artificial para mitigar estas lesiones a los usuarios finales.
    [Show full text]
  • Google Adquiere Motorola Mobility * Las Tablets PC Y Su Alcance * Synergy 1.3.1 * Circuito Impreso Al Instante * Proyecto GIMP-Es
    Google adquiere Motorola Mobility * Las Tablets PC y su alcance * Synergy 1.3.1 * Circuito impreso al instante * Proyecto GIMP-Es El vocero . 5 Premio Concurso 24 Aniversario de Joven Club Editorial Por Ernesto Rodríguez Joven Club, vivió el verano 2011 junto a ti 6 Aniversario 24 de los Joven Club La mirada de TINO . Cumple TINO 4 años de Los usuarios no comprueba los enlaces antes de abrirlos existencia en este septiembre, el sueño que vió 7 Un fallo en Facebook permite apropiarse de páginas creadas la luz en el 2007 es hoy toda una realidad con- Google adquiere Motorola Mobility vertida en proeza. Esfuerzo, tesón y duro bre- gar ha acompañado cada día a esta Revista que El escritorio . ha sabido crecerse en sí misma y superar obs- 8 Las Tablets PC y su alcance táculos y dificultades propias del diario de cur- 11 Propuesta de herramientas libre para el diseño de sitios Web sar. Un colectivo de colaboración joven, entu- 14 Joven Club, Infocomunidad y las TIC siasta y emprendedor –bajo la magistral con- 18 Un vistazo a la Informática forense ducción de Raymond- ha sabido mantener y El laboratorio . desarrollar este proyecto, fruto del trabajo y la profesionalidad de quienes convergen en él. 24 PlayOnLinux TINO acumula innegables resultados en estos 25 KMPlayer 2.9.2.1200 años. Más de 350 000 visitas, un volumen apre- 26 Synergy 1.3.1 ciable de descargas y suscripciones, servicios 27 imgSeek 0.8.6 estos que ha ido incorporando, pero por enci- El entrevistado . ma de todo está el agradecimiento de muchos 28 Hilda Arribas Robaina por su existencia, por sus consejos, su oportu- na información, su diálogo fácil y directo, su uti- El taller .
    [Show full text]
  • Estado Del Arte Metodologia Para El Desarrollo De
    ESTADO DEL ARTE METODOLOGIA PARA EL DESARROLLO DE APLICACIONES EN 3D PARA WINDOWS CON VISUAL STUDIO 2008 Y XNA 3.1 Integrantes EDWIN ANDRÉS BETANCUR ÁLVAREZ JHON FREDDY VELÁSQUEZ RESTREPO UNIVERSIDAD SAN BUENAVENTURA Facultad de Ingenierías Seccional Medellín Año 2012 TABLA DE CONTENIDO AGRADECIMIENTOS 3 PARTE 1 ESTADO DEL ARTE 1. RESUMEN 4 2. INTRODUCCIÓN 5 3. MICROSOFT XNA GAME STUDIO 6 3.1 CONCEPTOS 7 4. HISTORIA 10 4.1 El origen de los Videojuegos 10 4.2 XNA en la actualidad 18 4.3 Versiones 19 4.3.1 XNA Game Studio Professional 19 4.3.2 XNA Game Studio Express 20 4.3.3 XNA Game Studio 2.0 21 4.3.4 XNA Game Studio 3.0 21 4.3.5 XNA Game Studio 3.1 22 4.3.6 XNA Game Studio 4.1 23 5. OTROS FRAMEWORKS (ALTERNATIVAS) 24 5.1 PYGAME 24 5.2 PULPCORE 25 5.3 GAMESALAD 25 5.4 ADVENTURE GAME 26 5.5 BLENDER 3D 27 5.6 CRYSTAL SPACE 27 5.7 DIM3 28 5.8 GAME MAKER 28 5.9 M.U.G.E.N 28 6. POR QUÉ XNA 3.1 29 PARTE 2 MARCO TEORICO 7.1 BUCLE DEL JUEGO 32 7.2 Componentes del juego 33 7.3 COMPORTAMIENTO 35 8. REFERENCIAS 37 9. REFERENCIAS DE IMAGENES 39 10. ANEXOS 43 2 AGRADECIMIENTOS Este proyecto de grado tiene un origen muy especial el cual esta plasmado y respaldado por la trayectoria del Microsoft Student Tech Club USB donde se ha hecho el esfuerzo como estudiantes de pregrado para incentivar, informar y enamorar a los integrantes de la institución con las múltiples posibilidades y plataformas que nos ofrece Microsoft al tener el estado de estudiantes en formación.
    [Show full text]
  • Tutorial CUDA
    Graphic Processing Units – GPU (Section 7.7) History of GPUs • VGA in early 90’s -- A memory controller and display generator connected to some (video) RAM • By 1997, VGA controllers were incorporating some acceleration functions • In 2000, a single chip graphics processor incorporated almost every detail of the traditional high-end workstation graphics pipeline - Processors oriented to 3D graphics tasks - Vertex/pixel processing, shading, texture mapping, rasterization • More recently, processor instructions and memory hardware were added to support general-purpose programming languages • OpenGL: A standard specification defining an API for writing applications that produce 2D and 3D computer graphics • CUDA (compute unified device architecture): A scalable parallel programming model and language for GPUs based on C/C++ 70 Historical PC architecture 71 Contemporary PC architecture 72 Basic unified GPU architecture 73 Tutorial CUDA Cyril Zeller NVIDIA Developer Technology Note: These slides are truncated from a longer version which is publicly available on the web Enter the GPU GPU = Graphics Processing Unit Chip in computer video cards, PlayStation 3, Xbox, etc. Two major vendors: NVIDIA and ATI (now AMD) © NVIDIA Corporation 2008 Enter the GPU GPUs are massively multithreaded manycore chips NVIDIA Tesla products have up to 128 scalar processors Over 12,000 concurrent threads in flight Over 470 GFLOPS sustained performance Users across science & engineering disciplines are achieving 100x or better speedups on GPUs CS researchers can use
    [Show full text]
  • Krakow, Poland
    Proceedings of the Second International Workshop on Sustainable Ultrascale Computing Systems (NESUS 2015) Krakow, Poland Jesus Carretero, Javier Garcia Blas Roman Wyrzykowski, Emmanuel Jeannot. (Editors) September 10-11, 2015 Volume Editors Jesus Carretero University Carlos III Computer Architecture and Technology Area Computer Science Department Avda Universidad 30, 28911, Leganes, Spain E-mail: [email protected] Javier Garcia Blas University Carlos III Computer Architecture and Technology Area Computer Science Department Avda Universidad 30, 28911, Leganes, Spain E-mail: [email protected] Roman Wyrzykowski Institute of Computer and Information Science Czestochowa University of Technology ul. D ˛abrowskiego 73, 42-201 Cz˛estochowa, Poland E-mail: [email protected] Emmanuel Jeannot Equipe Runtime INRIA Bordeaux Sud-Ouest 200, Avenue de la Vielle Tour, 33405 Talence Cedex, France E-mail: [email protected] Published by: Computer Architecture,Communications, and Systems Group (ARCOS) University Carlos III Madrid, Spain http://www.nesus.eu ISBN: 978-84-608-2581-4 Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. This document also is supported by: Printed in Madrid — October 2015 Preface Network for Sustainable Ultrascale Computing (NESUS) We are very excited to present the proceedings of the Second International Workshop on Sustainable Ultrascale Com- puting Systems (NESUS 2015), a workshop created to reflect the research and cooperation activities made in the NESUS COST Action (IC1035) (www.nesus.eu), but open to all the research community working in large/ultra-scale computing systems.
    [Show full text]
  • Video Game Design Method for Novice
    GSTF INTERNATIONAL JOURNAL ON COMPUTING, VOL. 1, NO. 1, AUGUST 2010 19 Video Game Design Method for Novice Wei-Da Hao1 and Akash Khurana2 Department of Electrical Engineering and Computer Science, Texas A&M University-Kingsville 700 University Blvd, Kingsville, TX 78363, USA [email protected] [email protected] Abstract—This paper shows how college students without To program in Alice for video game, in addition to prior experience in video game design can create an interesting the well-known drag-and-drop editor to avoid syntax error, video game. Video game creation is a task that requires weeks students use many of the 3D objects and manipulating methods if not months of dedication and perseverance to complete. resulting from decades of progress in computer graphics and However, with Alice, a group of three sophomore students who virtual reality in a user-friendly environment; for example, never designed a game can create a full-fledged video game from modelling 3D objects are avoided because a wealth of 3D given specifications. Alice is 3D graphics interactive animation objects needed in the design are accessible from web gallery software, which is well-tried and proven to be an enjoyable linked to Alice. Moreover, Alice is a gift from Carnegie Mellon learning environment. At the start of this project, students are University, USA, which is freely downloadable from website given guidelines that describe expected outcomes. With minimum www.alice.org. Next version of Alice will be developed supervision, in three days, a working program that matches the by collaboration between academic communities and Sun- guidelines is accomplished.
    [Show full text]
  • On the Modeling, Simulation, and Visualization of Many-Body Dynamics Problems with Friction and Contact
    ON THE MODELING, SIMULATION, AND VISUALIZATION OF MANY-BODY DYNAMICS PROBLEMS WITH FRICTION AND CONTACT By Toby D. Heyn A dissertation submitted in partial fulfillment of the requirements for the degree of Doctor of Philosophy (Mechanical Engineering) at the UNIVERSITY OF WISCONSIN–MADISON 2013 Date of final oral examination: 05/06/2013 The dissertation is approved by the following members of the Final Oral Committee: Dan Negrut, Associate Professor, Mechanical Engineering Stephen Wright, Professor, Computer Science Nicola J. Ferrier, Professor, Mechanical Engineering Neil A. Duffie, Professor, Mechanical Engineering Krishnan Suresh, Associate Professor, Mechanical Engineering Mihai Anitescu, Professor, Statistics, University of Chicago c Copyright by Toby D. Heyn 2013 All Rights Reserved i To my (growing) family ii ACKNOWLEDGMENTS I would like to thank my advisor, Professor Dan Negrut, for his guidance and support. His dedication and commitment continue to inspire me. I would also like to thank Professor Alessandro Tasora and Dr. Mihai Anitescu for their assistance throughout this work. I am also grateful for the expertise of the other committe members, and the assistance, friendship, and conversation of my colleagues in the Simulation-Based Engineering Laboratory. Finally, I am so grateful to my family. The unconditional love, support, and patience of my wife, parents, and sisters means so much to me. DISCARD THIS PAGE iii TABLE OF CONTENTS Page LIST OF TABLES ....................................... vi LIST OF FIGURES ...................................... vii ABSTRACT ..........................................xiii 1 Introduction ........................................ 1 1.1 Background . 2 1.2 Motivation . 3 1.3 Document Overview . 5 1.4 Specific Contributions . 7 2 Many-Body Dynamics .................................. 9 2.1 DVI Formulation .
    [Show full text]
  • Integration of a Raytracing-Based Visualization Component Into An
    Integration of an Interactive Raytracing-Based Visualization Component into an Existing Simulation Kernel Master Thesis Tobias Werner October 10, 2011 Angewandte Informatik V | Intelligent Graphics Group Universit¨atBayreuth 95440 Bayreuth, Germany Integration of a Raytracing-Based Visualization Component Table of Contents 1 Introduction7 1.1 Abstract................................... 7 1.2 Overview .................................. 7 2 Rasterization and raytracing9 2.1 Basic concepts ............................... 9 2.2 Rasterization ................................ 9 2.3 Raytracing ................................. 10 2.4 Comparison................................. 13 2.4.1 Performance ............................ 13 2.4.2 Design elegance........................... 13 2.4.3 Conclusion ............................. 14 2.5 Historical development........................... 14 2.5.1 Early years ............................. 14 2.5.2 The rendering equation ...................... 15 2.5.3 Consumer adaption ........................ 16 2.5.4 Specialized graphics hardware................... 17 2.5.5 General programming on graphics hardware........... 18 2.5.6 Use in modern applications.................... 19 3 State of the Art 21 3.1 Interactive raytracing ........................... 21 3.1.1 General review........................... 21 3.1.2 KD-trees .............................. 25 3.1.3 Bounding volume hierarchies ................... 26 3.1.4 GPU-based bounding volume rebuilds.............. 29 3.1.5 Memory coherence algorithms..................
    [Show full text]
  • NUMA and GPU So, I Know How to Use MPI and Openmp
    Some hot topics in HPC NUMA and GPU So, I know how to use MPI and OpenMP... is that all ? • (Un)fortunately no Today’s lecture is about two “hot topics” in HPC: • NUMA nodes and thread affinity • GPUs (accelerators) 2 / 52 Outline 1 UMA and NUMA Review Remote access Thread scheduling 2 Cache memory Review False sharing 3 GPUs What’s that ? Architecture A first example (CUDA) Let’s get serious Asynchronous copies 3 / 52 Outline 1 UMA and NUMA Review Remote access Thread scheduling 2 Cache memory Review False sharing 3 GPUs What’s that ? Architecture A first example (CUDA) Let’s get serious Asynchronous copies 4 / 52 UMA and NUMA (Review) — What’s inside a modern cluster 1. A network 2. Interconnected nodes 3. Nodes with multiple processors/sockets (and accelerators) 4. Processors/sockets with multiple cores 5 / 52 UMA and NUMA (Review) — And what about memory ? From the network point of view: • Each node (a collection of processors) has access to its own memory • The nodes are communicating by sending messages • We called that distributed memory and used MPI to handle it From the node point of view: • The (node’s own) memory is shared among the cores • We called that shared memory and used OpenMP to handle it • Ok, but how is it shared ? −→ Uniform Memory Access (UMA) −→ Non-Uniform Memory Access (NUMA) 6 / 52 UMA and NUMA (Review) — The UMA way Memory c0 c1 c2 c3 c4 c5 c6 c7 The cores and the memory modules are interconnected by a bus Every core can access any part of the memory at the same speed Pros: • No matter where the data are located
    [Show full text]