diff --git a/.gitignore b/.gitignore
index d2850ba8..5b80bc45 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,7 +39,7 @@ fio
!SDAccel/aws_platform/xilinx_aws-vu9p-f1_1ddr-xpr-2pr_4_0/sw/lib/x86_64/libxilinxopencl.so
!SDAccel/aws_platform/xilinx_aws-vu9p-f1_4ddr-xpr-2pr_4_0/sw/lib/x86_64/libxilinxopencl.so
!SDAccel/aws_platform/xilinx_aws-vu9p-f1_4ddr-xpr-2pr-debug_4_0/sw/lib/x86_64/libxilinxopencl.so
-!SDAccel/aws_platform/xilinx_aws-vu9p-f1_dynamic_5_0/sw/lib/x86_64/libxilinxopencl.so
+!SDAccel/aws_platform/xilinx_aws-vu9p-f1_dynamic_5_0/sw/lib/x86_64/libxilinxopencl.so
!SDAccel/aws_platform/xilinx_aws-vu9p-f1-04261818_dynamic_5_0/sw/lib/x86_64/libxilinxopencl.so
nohup.out
@@ -106,6 +106,13 @@ vivado*.log
# Patches
patches/*
+# Temporary files
+.batch
+.temp
+
+.python-version
+
+# FireSim specific
awsver.txt
sdk/linux_kernel_drivers/xdma/.libxdma.o.d
diff --git a/.gitmodules b/.gitmodules
index 3cccf78d..3cad5231 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +1,13 @@
-[submodule "SDAccel/examples/xilinx_2017.4"]
- path = SDAccel/examples/xilinx_2017.4
+[submodule "SDAccel/examples/xilinx_2019.1"]
+ path = SDAccel/examples/xilinx_2019.1
url = https://github.com/Xilinx/SDAccel_Examples.git
- branch = aws_2017.4
-[submodule "SDAccel/examples/xilinx_2018.2"]
- path = SDAccel/examples/xilinx_2018.2
- url = https://github.com/Xilinx/SDAccel_Examples.git
- branch = 2018.2
-[submodule "SDAccel/examples/xilinx_2018.3"]
- path = SDAccel/examples/xilinx_2018.3
- url = https://github.com/Xilinx/SDAccel_Examples.git
- branch = master
+[submodule "Vitis/examples/xilinx_2019.2"]
+ path = Vitis/examples/xilinx_2019.2
+ branch = master
+ url = https://github.com/Xilinx/Vitis_Accel_Examples
+[submodule "Vitis/examples/xilinx_2020.1"]
+ path = Vitis/examples/xilinx_2020.1
+ url = https://github.com/Xilinx/Vitis_Accel_Examples
+[submodule "Vitis/examples/xilinx_2020.2"]
+ path = Vitis/examples/xilinx_2020.2
+ url = https://github.com/Xilinx/Vitis_Accel_Examples
diff --git a/ERRATA.md b/ERRATA.md
index a56958a4..5fbf6d82 100644
--- a/ERRATA.md
+++ b/ERRATA.md
@@ -5,14 +5,35 @@
[Shell\_04261818_Errata](./hdk/docs/AWS_Shell_ERRATA.md)
## HDK
-* Multiple SDE instances per CL is not supported in this release. Support planned for future release.
+* Multiple SDE instances per CL is not supported in this release. Support is planned for a future release.
* DRAM Data retention is not supported for CL designs with less than 4 DDRs enabled
-* Combinatorial loops in CL designs are not supported.
-* Shell Model (sh_bfm) provided with testbench for design simulations, continues to drive read data on PCIM AXI rdata channel even when rready is de-asserted. Will be fixed in future release.
+* Combinatorial loops in CL designs are not supported.
+* Connecting one of the clocks provided from the shell (clk_main_a0, clk_extra_a1, etc...) directly to a BUFG in the CL is not supported by the Xilinx tools and may result in a non-functional clock. To workaround this limitation, it is recommended to use an MMCM to feed the BUFG (clk_from_shell -> MMCM -> BUFG). Please refer to [Xilinx AR# 73360](https://www.xilinx.com/support/answers/73360.html) for further details.
+
+### Xilinx Design Advisory for UltraScale/UltraScale+ DDR4/DDR3 IP - Memory IP Timing Exceptions (AR# 73068)
+AWS EC2 F1 customers using the DDR4 IP in customer logic (HDK or SDAccel/Vitis designs) may be impacted by a recent design advisory from Xilinx.
+
+AWS customers may experience hardware failures including: post calibration data errors and DQS gate tracking issues. The error condition is build dependent and errors would need to be detected on the first write/read access after a successful calibration to prevent further data corruption.
+
+To detect if your build is impacted by this bug, AWS recommends all EC2 F1 customers utilizing the DDR4 IP in their designs should run a TCL script on the design checkpoint point (DCP) to check to determine if the design is susceptible to this issue. If the check passes, your design is safe to use as the hardware will function properly.
+If the check fails, the design is susceptible to the issue and will need to be regenerated using the same tool version with the AR 73068 patch.
+For designs under development, we recommend applying the patch to your on-premises tools or update to developer kit v1.4.15.
+For additional details, please refer to the [Xilinx Answer Record #73068](https://www.xilinx.com/support/answers/73068.html)
+
+We recommend using [Developer Kit Release v1.4.15a](https://github.com/aws/aws-fpga/releases/tag/v1.4.15a) or newer to allow for patching and fixing the DDR4 IP timing exception by re-generating the IP.
+
+### 2019.1
+* Vivado `compile_simlib` command fails to generate the following verilog IP libraries for the following simulators.
+* Please refer to the Xilinx Answer record for details.
+
+| Library(verilog) | Simulator | Xilinx Answer Record |
+|---|---|---|
+| `sync_ip` | Cadence IES | [AR72795](https://www.xilinx.com/support/answers/72795.html) |
+| `hdmi_gt_controller_v1_0_0` | Synopsys VCS | [AR72601](https://www.xilinx.com/support/answers/72601.html) |
## SDK
## SDAccel (For additional restrictions see [SDAccel ERRATA](./SDAccel/ERRATA.md))
* Virtual Ethernet is not supported when using SDAccel
* DRAM Data retention is not supported for kernels that provision less than 4 DDRs
-* Combinatorial loops in CL designs are not supported.
+* Combinatorial loops in CL designs are not supported.
diff --git a/FAQs.md b/FAQs.md
index 4a12d4eb..52e5ce16 100644
--- a/FAQs.md
+++ b/FAQs.md
@@ -19,18 +19,17 @@
## General F1 FAQs
-**Q: How is developing a FPGA design for the cloud different from the common practice outside the cloud?**
+**Q: How is developing an FPGA design for the cloud different from the common practice outside the cloud?**
AWS designed its FPGA instances to provide a developer experience with ease of use and as similar as possible to on-premises development environment with the following differences (advantages):
-
- Developers don’t need to purchase / design / bringup or debug the physical hardware where the FPGA is hosted, nor the platform/server hardware: all the hardware is verified, monitored, and maintained by AWS.
-- AWS provides an [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) that contains Xilinx Vivado development environment, with all the needed licenses. By using the FPGA developer AMI developers have a choice to a wide range of instance (different CPU and Memory configuration) allowing developers to optimize their development flow.
+- AWS provides an [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) that contains Xilinx Vivado development environment, with all the needed licenses. By using the FPGA Developer AMI developers have a choice to a wide range of instance (different CPU and Memory configuration) allowing developers to optimize their development flow.
- AWS provides cloud based debug tools: [Virtual JTAG](./hdk/docs/Virtual_JTAG_XVC.md) which is equivalent to debug using JTAG with on-premises development, and Virtual LED together with Virtual DIP Switch emulation the LED and DIP switches in typical development board.
-- For developers who want to develop on-premises, Xilinx provides an [on-premises license](./hdk/docs/on_premise_licensing_help.md ) that matches all the needed components needed to be licensed for F1 development on premises.
+- For developers who want to develop on-premises, Xilinx provides an [on-premises license](docs/on_premise_licensing_help.md ) that matches all the needed components needed to be licensed for F1 development on premises.
- The developers' output is a Design Checkpoint (DCP) and not an FPGA bitstream: The FPGA bitstream is actually generated by AWS after the developer submits the DCP.
@@ -46,30 +45,32 @@ There are two parts to answer this question:
For developers that are familiar with AWS, there is almost no additional time to get right into F1 development environment, as long as the documentation and guidances in the [FPGA HDK/SDK](https://github.com/aws/aws-fpga) are followed.
-For developers who are new to AWS, there is typically a one to two days ramp on AWS general topics such as launching EC2 instance, setting up S3 storage and its permissions, using AWS console, etc… For new developers to AWS, we recommend to start with the [FPGA Developer Forum](https://forums.aws.amazon.com/ann.jspa?annID=4448)
+For developers who are new to AWS, there is typically a one to two days ramp on AWS general topics such as launching EC2 instance, setting up S3 storage and its permissions, using AWS console, etc… For new developers to AWS, we recommend starting with the [FPGA Developer Forum](https://forums.aws.amazon.com/ann.jspa?annID=4448)
- On-going development flow:
-Once developers complete their DCP, they submit the design through an AWS EC2 API to create the Amazon FGPA Image (aka AFI, this API call can take a few hours to complete, and the status of the process is reported in the S3 log bucket provides by the developers. AWS is working to improve the turn time of AFI generation.
+Once developers create their DCP, they submit the design through an AWS EC2 API to create the Amazon FPGA Image (aka AFI, this API call can take a few hours to complete, and the status of the process is reported in the S3 log bucket provides by the developers. AWS is working to improve the turn time of AFI generation.
**Q: What new skill sets will be required from an FPGA developer in the cloud?**
-As AWS has taken all the non-differentiating, heavy lifting of hardware design, debug and implementation of PCIe tuning, FPGA I/O assignment, power, thermal management, and runtime health monitoring. Therefore AWS FPGA developers can focus on their own differentiating logic, instead of spending time on hardware bringup/debug and maintenance.
+AWS takes care of all the non-differentiating, heavy lifting of hardware design, debug and implementation of PCIe tuning, FPGA I/O assignment, power, thermal management, and runtime health monitoring.
+
+This enables AWS FPGA developers to focus on their own differentiating logic, instead of spending time on hardware bring-up/debug and maintenance.
-On the business side, AWS Marketplace (MP) provides FPGA developers the opportunity to sell hardware accelerations to all of AWS users: Ramping on AWS MP services, capabilities and commercial opportunities are recommended knowledge for developers interested in selling their AFIs on AWS MP. Education and research institutes can use AWS MP to distribute their research work ; having access to vast amounts of free [public data-sets](https://aws.amazon.com/public-datasets/ ) can be of value when running research hardware accelerations on AWS.
+On the business side, AWS Marketplace (MP) provides FPGA developers the opportunity to sell hardware accelerations to all of AWS users: Ramping on AWS MP services, capabilities and commercial opportunities are recommended knowledge for developers interested in selling their AFIs on AWS MP. Education and research institutes can use AWS MP to distribute their research work. Having access to vast amounts of free [public data-sets](https://aws.amazon.com/public-datasets/ ) can be of value when running research hardware accelerations on AWS.
Finally, AWS consulting and technology partners can offer their services through the [AWS Partner Network](https://aws.amazon.com/ec2/instance-types/f1/partners/) to AWS users that don’t have specific FPGA development knowledge, in order to develop FPGA accelerations in the cloud by themselves.
-**Q: How is deployment FPGA in the cloud different compared to on-premises?**
+**Q: How is developing on FPGA's in the cloud different from on-premises?**
With AWS, FPGAs developers have a few advantages:
- Low entry bar: AWS FPGAs are charged on an hourly rate instead of the many thousands of dollars spent on hardware/licenses and 12+ months time it takes to design/manufacture and ship a production-ready FPGA hardware solution.
-- Scalability and Elasticity: Developers can ramp up / down the number of deployed FPGAs within seconds based on offered load.
+- Scalability and Elasticity: Developers can ramp up / down the number of deployed FPGAs within seconds based on required load.
- Share: FPGA developers can share their designs easily through AWS Marketplace or APN. This is important for businesses as well as education and research use.
@@ -80,11 +81,11 @@ With AWS, FPGAs developers have a few advantages:
The HDK includes the following main components:
-1) Documentation for the Shell interface and other Custom Logic implementation guidelines, the Shell models needed for Custom Logic development, simulation models for the Shell, software for exercising.
+1) Documentation for the Shell interface and other Custom Logic implementation guidelines, shell models needed for Custom Logic development, simulation models for the shell, scripts for building and simulating, etc.
2) Custom Logic examples, a getting started guide for building your own Custom Logic, and examples for starting a Custom Logic Design.
-3) Scripts for building and submitting Amazon FPGA Image (AFI) from a Custom Logic.
+3) Scripts for building and creating Amazon FPGA Images (AFI) from a Custom Logic.
4) Reference software drivers to be used in conjunction with the Custom Logic examples.
@@ -93,7 +94,7 @@ The HDK includes the following main components:
**Q: What is in the AWS Shell?**
-The AWS Shell is the part of the FPGA that is provided and managed by AWS: it implements the non-differentiated development and heavy lifting tasks like setting up the PCIe interface, FPGA image download, security, health monitoring, metrics and debug hooks.
+The AWS Shell is the part of the FPGA that is provided and managed by AWS: it implements the non-differentiated development and heavy lifting tasks like setting up the PCIe interface, security, health monitoring, metrics and debug hooks.
Every FPGA deployed in AWS cloud includes an AWS Shell, and the developer Custom Logic (CL) interfaces with the available AWS Shell interfaces.
@@ -102,7 +103,15 @@ Every FPGA deployed in AWS cloud includes an AWS Shell, and the developer Custom
It is the compiled FPGA code that is loaded into an FPGA in AWS for performing the Custom Logic (CL) function created by the developer. AFIs are maintained by AWS according and associated with the AWS account that created them. The AFI includes the CL and AWS FPGA Shell. An AFI ID is used to reference a particular AFI from an F1 instance.
-The developer can create multiple AFIs at no extra cost, up to a defined limited (typically 100 AFIs per region per AWS account). An AFI can be loaded into as many FPGAs as needed.
+The developer can create multiple AFIs at no extra cost, up to a defined limited (typically 500 AFIs per region per AWS account). An AFI can be loaded into as many FPGAs as needed.
+
+**Q: How do I increase my AFI limit?**
+
+You can increase your AFI limit by creating an [AWS Support Case](https://console.aws.amazon.com/support/home#/case/create).
+1. Select the `Service Limit Increase` tab
+2. In the `Limit Type`, select `EC2 FPGA`
+3. Select the region(s) where you want your limit to be increased
+4. Add justification for the limit increase.
**Q: What regions are supported?**
@@ -110,12 +119,12 @@ The developer can create multiple AFIs at no extra cost, up to a defined limited
AWS FPGA generation and EC2 F1 instances are supported in us-east-1 (N. Virginia), us-west-2 (Oregon), eu-west-1 (Ireland) and us-gov-west-1 (GovCloud US).
-
**Q: What is the process for creating an AFI?**
-The AFI process starts by creating Custom Logic (CL) code that conforms to the [Shell Specification](./hdk/docs/AWS_Shell_Interface_Specification.md). Then, the CL must be compiled using the HDK scripts which leverages Vivado tools to create a Design Checkpoint (DCP). That DCP is submitted to AWS for generating an AFI using the `aws ec2 create-fpga-image` API.
-
-Use the AWS CLI `describe-fpga-images` API to get information about the created AFIs using the AFI ID provided by `create-fpga-image`, or to list available AFIs for your account. See [describe-fpga-images](./hdk/docs/describe_fpga_images.md) document for details on how to use this API.
+* The AFI process starts by creating Custom Logic (CL) code that conforms to the [Shell Specification](./hdk/docs/AWS_Shell_Interface_Specification.md).
+* Then, the CL must be compiled using the HDK scripts which leverages Vivado tools to create a Design Checkpoint (DCP).
+* That DCP is submitted to AWS for generating an AFI using the `aws ec2 create-fpga-image` API.
+ * Use the AWS CLI `describe-fpga-images` API to get information about the created AFIs using the AFI ID provided by `create-fpga-image`, or to list available AFIs for your account. See [describe-fpga-images](./hdk/docs/describe_fpga_images.md) document for details on how to use this API.
**Q: Can I load an AFI on every region AWS FPGA is supported?**
@@ -138,19 +147,13 @@ Yes, use [delete-fpga-image](./hdk/docs/delete_fpga_image.md) to delete an AFI i
Use [delete-fpga-image](./hdk/docs/delete_fpga_image.md) carefully. Once all AFIs of the same global AFI ID are deleted, the AFIs cannot be recovered from deletion. Review [IAM policy best practices](http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege) to restrict access to this API.
-**Q: Can I share an AFI with other AWS accounts?**
-
-Yes, sharing allows accounts other than the owner account to load and use an AFI. Use [modify-fpga-image-attribute](./hdk/docs/fpga_image_attributes.md) API to update `loadPermission` attribute to grant/remove AFI load permission. AWS AFIs support two load permission types:
-* `UserId`: share AFI with specific AWS accounts using account IDs.
-* `UserGroups`: only supports `all` group to make an AFI public or private.
-
-Use [reset-fpga-image-attribute](./hdk/docs/fpga_image_attributes.md) API to revoke all load permissions.
+**Q: How do I increase my AFI limit?**
-**Q: Can I delete an AFI?**
+AFI limit increases may be requested by opening up a Support Case from your [EC2 Support Console](https://console.aws.amazon.com/support/cases#/create)
-Yes, use [delete-fpga-image](./hdk/docs/delete_fpga_image.md) to delete an AFI in a specific region. Deleting an AFI in one region does not affect AFIs in other regions.
+Select a `Service limit increase` of the Limit Type - `EC2 FPGA` for the region where a limit increase is needed.
-Use [delete-fpga-image](./hdk/docs/delete_fpga_image.md) carefully. Once all AFIs of the same global AFI ID are deleted, the AFIs cannot be recovered from deletion. Review [IAM policy best practices](http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege) to resrict access to this API.
+You will hear back from our support team once the limit is increased.
**Q: Can I bring my own bitstream for loading on an F1 FPGA?**
@@ -175,7 +178,7 @@ AWS prefers not to limit developers to a specific template in terms of how we ad
If you decide to use the [FPGA Developer AMI on AWS Marketplace](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ), Xilinx licenses for simulation, encryption, SDAccel and Design Checkpoint generation are included at no additional cost.
-If you want to run using other methods or on a local machine, you will need to obtain any necessary licenses, specifically you will need to have setup the appropriate Xilinx Vivado license. For more details, please refer to [On-premises licensing help](./hdk/docs/on_premise_licensing_help.md)
+If you want to run using other methods or on a local machine, you will need to obtain any necessary licenses, specifically you will need to have setup the appropriate Xilinx Vivado license. For more details, please refer to [On-premises licensing help](docs/on_premise_licensing_help.md)
**Q: Does AWS provide physical FPGA boards for on-premises development?**
@@ -185,20 +188,25 @@ No. AWS supports a cloud-only development model and provides the necessary eleme
**Q: Do I need to design for a specific power envelope?**
-Yes, the Xilinx UltraScale+ FPGA devices used on the F1 instances have a maximum power limit that must be maintained. If a loaded AFI consumes maximum power, the F1 instance will automatically gate the input clocks provided to the AFI in order to prevent errors within the FPGA. Developers are provided warnings when power (Vccint) is greater than 85 watts. Above that level, the CL is in danger of being clock gated. [Additional details on AFI power](hdk/docs/afi_power.md)
-
+Yes, the Xilinx UltraScale+ FPGA devices used on the F1 instances have a maximum power limit that must be maintained.
+If a loaded AFI consumes maximum power, the F1 instance will automatically gate the input clocks provided to the AFI in order to prevent errors within the FPGA.
+Developers are provided warnings when power (Vccint) is greater than 85 watts. Above that level, the CL is in danger of being clock gated.
-**Q: What IP blocks are provided in the HDK?**
+[Additional details on AFI power](hdk/docs/afi_power.md)
-The HDK includes IP for AWS Shell and DRAM interface controllers. Inside the Shell, there is a PCIe interface, DMA Engine, and one DRAM interface controller. These blocks are only accessible via the AXI interfaces defined by the Shell-Custom Logic interface. The HDK provides additional IP blocks for the other DRAM interfaces, enabling up to 3 additional DRAM interfaces instantiated by the developer in the Custom Logic region.
- **Note** * future versions of the HDK will include IP for the FPGA Link interface.*
+**Q: What IP blocks are provided in the HDK?**
+The HDK includes IP for AWS Shell and DRAM interface controllers.
+Inside the Shell, there is a PCIe interface, DMA Engine, and one DRAM interface controller.
+These blocks are only accessible via the AXI interfaces defined by the Shell-Custom Logic interface.
+The HDK provides additional IP blocks for the other DRAM interfaces, enabling up to 3 additional DRAM interfaces instantiated by the developer in the Custom Logic region.
**Q: Can I use other IP blocks from Xilinx or other 3rd parties?**
-Yes. Developers are free to use any IP blocks within the Custom Logic region. Those can be 3rd party IPs or IP available in the Vivado IP catalog.
+Yes. Developers are free to use any IP blocks within the Custom Logic region.
+Those can be 3rd party IPs or IP available in the Vivado IP catalog.
**Note** * AWS supports only the IP blocks contained in the HDK.*
@@ -207,19 +215,23 @@ Yes. Developers are free to use any IP blocks within the Custom Logic region. Th
## Getting Started FAQs
**Q: What AWS knowledge do I need to learn before I can develop accelerators and run on AWS F1 instances?**
-[AWS Getting Started Resource Center](https://aws.amazon.com/getting-started/) has lots of resources to help developers get started. For F1 development, launching linux virtual machines (EC2) and storing and retrieving files from S3 are required skills.
+[AWS Getting Started Resource Center](https://aws.amazon.com/getting-started/) has lots of resources to help developers get started.
+For F1 development, launching EC2 instances and storing and retrieving files from S3 are required skills.
**Q: What do I need to get started on building accelerators for FPGA instances?**
-Getting started requires downloading the latest HDK and SDK from the AWS FPGA GitHub repository. The HDK and SDK provide the needed code and information for building FPGA code. The HDK provides all the information needed for developing an FPGA image from source code, while the SDK provides all the runtime software for managing the Amazon FPGA Image (AFI) loaded into the F1 instance FPGA.
+Getting started requires downloading the latest HDK and SDK from the [AWS FPGA GitHub repository](https://github.com/aws/aws-fpga).
+The HDK and SDK provide the needed code and information for building FPGA code. The HDK provides all the information needed for developing an FPGA image from source code, while the SDK provides all the runtime software for managing the Amazon FPGA Image (AFI) loaded into the F1 instance FPGA.
-Typically, FPGA development process requires a simulator to perform functional test on the source code, and a Vivado tool set for synthesis of source code into compiled FPGA code. The FPGA Developer AMI provided by AWS includes the complete Xilinx Vivado tools for simulation (XSIM) and synthesis of FPGA.
+Typically, FPGA development process requires a simulator to perform functional test on the source code, and a Vivado tool set for synthesis of source code into compiled FPGA code.
+The FPGA Developer AMI provided by AWS includes the complete Xilinx Vivado tools for simulation (XSIM) and synthesis of FPGA.
**Q: How do I develop accelerator code for an FPGA in an F1 instance?**
-Start with the [Shell interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md). This document describes the interface between Custom Logic and the AWS Shell. All Custom Logic for an accelerator resides within the Custom Logic region of the F1 FPGA.
+Start with the [Shell interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md).
+This document describes the interface between Custom Logic and the AWS Shell. All Custom Logic for an accelerator resides within the Custom Logic region of the F1 FPGA.
The [HDK README](./hdk/README.md) walks the developer through the steps to build an FPGA image from one of the provided examples as well as starting a new code.
@@ -265,12 +277,19 @@ We recommend using the latest available version to be able to use the expanding
## Marketplace FAQs
**Q: What does publishing my AFI/AMI to AWS Marketplace enables?**
-FPGA Developers can share or sell their AFI/AMI using the AWS Marketplace to other AWS users. Once in Marketplace, AWS users can launch an F1 instance with that AFI/AMI combination with the 1-click deployment feature. Marketplace Sellers can take advantage of the Management Portal to better build and analyze their business, using it to drive marketing activities and customer adoption. The metering, billing, collections, and disbursement of payments are managed by AWS, allowing developers to focus on marketing their solution. Please check out [AWS Marketplace Tour](https://aws.amazon.com/marketplace/management/tour/) for more details on how to become an AWS Marketplace seller, how to set pricing and collect metrics.
+FPGA Developers can share or sell their AFI/AMI using the AWS Marketplace to other AWS users.
+Once in Marketplace, AWS users can launch an F1 instance with that AFI/AMI combination with the 1-click deployment feature.
+Marketplace Sellers can take advantage of the Management Portal to better build and analyze their business, using it to drive marketing activities and customer adoption.
+The metering, billing, collections, and disbursement of payments are managed by AWS, allowing developers to focus on marketing their solution.
+
+Please check out [AWS Marketplace Tour](https://aws.amazon.com/marketplace/management/tour/) for more details on how to become an AWS Marketplace seller, how to set pricing and collect metrics.
**Q: How can I publish my AFI to AWS Marketplace?**
-First, you need to [register as a Marketplace Seller](https://aws.amazon.com/marketplace/management/register/). In parallel you should create an AMI that includes the drivers and runtime libraries needed to use your AFI. Finally, follow the [standard flow](https://aws.amazon.com/marketplace/help/200940360) to publish your AMI on AWS marketplace, providing the associated AFI IDs. In other words, AFIs are not published directly on AWS marketplace, rather AFI(s) should be associated with an AMI that gets published.
+* First, you need to [register as a Marketplace Seller](https://aws.amazon.com/marketplace/management/register/).
+* In parallel you should create an AMI that includes the drivers and runtime libraries needed to use your AFI.
+* Finally, follow the [standard flow](https://aws.amazon.com/marketplace/help/200940360) to publish your AMI on AWS marketplace, providing the associated AFI IDs. In other words, AFIs are not published directly on AWS marketplace, rather AFI(s) should be associated with an AMI that gets published.
**Q: Do AWS Marketplace customers see FPGA source code or a bitstream?**
@@ -281,7 +300,11 @@ Neither, no FPGA internal design code is exposed. AWS Marketplace customers that
## F1 Instance and Runtime Tools FAQs
**Q: What OS can run on the F1 instance?**
-CentOS 7.x is supported and tested on AWS EC2 F1 instance. Please see [release notes](./RELEASE_NOTES.md) for a description of compatible kernel & OS versions supported by a specific Developer kit version. Developers can utilize the source code in the SDK directory to compile other variants of Linux for use on F1. Windows OSs are not supported on F1.
+CentOS 7.x is supported and tested on AWS EC2 F1 instance.
+Please see [release notes](./RELEASE_NOTES.md) for a description of compatible Kernel & OS versions supported by a specific Developer kit version.
+Developers can utilize the source code in the SDK directory to compile other variants of Linux for use on F1.
+
+NOTE: Windows OSs are not supported on F1.
**Q: What are the interfaces between the F1 instance host CPU and the FPGAs?**
@@ -293,7 +316,6 @@ The first is the FPGA Image Management Tools. These APIs are detailed in the [SD
The second type of interface is direct address access to the Application PCIe Physical Functions (PF) of the FPGA. There is no API for this access. Rather, there is direct access to resources in the Custom Logic (CL) region or Shell that can be accessed by software written on the instance. For example, the ChipScope software (Virtual JTAG) uses address space in a PF to provide FPGA debug support. Developers can create any API to the resources in their CL. See the [Shell Interface Specification](./hdk/docs/AWS_Shell_Interface_Specification.md) for more details on the address space mapping as seen from the instance.
-
**Q: Can I integrate the FPGA Image Management Tools in my application?**
Yes, In addition to providing the [FPGA Management Tools](./sdk/userspace/fpga_mgmt_tools) as linux shell commands, the [SDK Userspace](./sdk/userspace) directory includes files in the `include` and `hal` to integrate the FPGA Management Tools into the developer's application(a) and avoid calling linux shell commands.
@@ -326,8 +348,8 @@ The AWS infrastructure scrubs FPGA state on termination of an F1 instance and an
**Q: How do the FPGAs connect to the x86 CPU?**
-Each FPGA in F1 is connected to the instance CPU via a x16 PCIe Gen3 interface. Physical Functions (PF) within the FPGA are directly mapped into the F1 instance. Software on the instance can directly access the address in the PF to take advantage of the high performance PCIe interface.
-
+Each FPGA in F1 is connected to the instance CPU via a x16 PCIe Gen3 interface.
+Physical Functions (PF) within the FPGA are directly mapped into the F1 instance. Software on the instance can directly access the address in the PF to take advantage of the high performance PCIe interface.
**Q: Can the FPGAs on F1 directly access Amazon’s network?**
@@ -346,8 +368,10 @@ No. The FPGAs do not have direct access to the SSDs on F1. The SSDs on F1 are hi
## Development Languages FAQs
**Q: Which HDL languages are supported?**
-For RTL level development: Verilog and VHDL are both supported in the FPGA Developer AMI and in generating a Design Checkpoint. The Xilinx Vivado tools and simulator support mixed mode simulation of Verilog and VHDL. The AWS Shell is written in Verilog. Support for mixed mode simulation may vary if developers use other simulators. Check your simulator documentation for Verilog/VHDL/System Verilog support.
-
+For RTL level development: Verilog and VHDL are both supported in the FPGA Developer AMI and in generating a Design Checkpoint.
+The Xilinx Vivado tools and simulator support mixed mode simulation of Verilog and VHDL.
+The AWS Shell is written in Verilog. Support for mixed mode simulation may vary if developers use other simulators.
+Check your simulator documentation for Verilog/VHDL/System Verilog support.
**Q: Is OpenCL and/or SDAccel Supported?**
@@ -355,41 +379,44 @@ For RTL level development: Verilog and VHDL are both supported in the FPGA Devel
Yes. Please review the [SDAccel README to get started](SDAccel/README.md)
-
**Q: Can I use High Level Synthesis(HLS) Tools to generate an AFI?**
-Yes. Vivado HLS and SDAccel are directly supported through the FPGA Developer AMI. Any other HLS tool that generates compatible Verilog or VHDL for Vivado input can also be used for writing in HLS.
-
+Yes. Vivado HLS and SDAccel are directly supported through the FPGA Developer AMI.
+Any other HLS tool that generates compatible Verilog or VHDL for Vivado input can also be used for writing in HLS.
**Q: What RTL simulators are supported?**
The FPGA Developer AMI has built-in support for the Xilinx XSIM simulator. All licensing and software for XSIM is included in the FPGA Developer AMI when launched.
-Support for other simulators is included through the bring-your-own license in the FPGA Developer AMI. AWS tests the HDK with Synopsys VCS, Mentor Questa/ModelSim, and Cadence Incisive. Licenses for these simulators must be acquired by the developer and are not available with AWS FPGA Developer AMI.
+AWS tests the HDK with Synopsys VCS, Mentor Questa/ModelSim, and Cadence Incisive. Licenses for these simulators must be acquired by the developer and are not available with AWS FPGA Developer AMI.
## FPGA Specific FAQs
**Q: What FPGA is used in AWS EC2 F1 instance?**
-The FPGA for F1 is the Xilinx Ultrascale+ VU9P device with the -2 speed grade. The HDK scripts have the compile scripts needed for the VU9P device.
+The FPGA for F1 is the Xilinx Ultrascale+ VU9P device with the -2 speed grade.
+The HDK scripts have the compile scripts needed for the VU9P device.
**Q: What is FPGA Direct and how fast is it?**
-FPGA Direct is FPGA to FPGA low latency high throughput peer communication through the PCIe links on each FPGA, where all FPGAs shared the same memory space. The PCIe BAR space in the Application PF (see [Shell Interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md) for more details) allows the developer to map regions of the Custom Logic, such as external DRAM space, to other FPGAs. The implementation of communication protocol and data transfer engine across the PCIe interface using FPGA direct is left to the developer.
+FPGA Direct is FPGA to FPGA low latency high throughput peer communication through the PCIe links on each FPGA, where all FPGAs shared the same memory space.
+The PCIe BAR space in the Application PF (see [Shell Interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md) for more details) allows the developer to map regions of the Custom Logic, such as external DRAM space, to other FPGAs.
+The implementation of communication protocol and data transfer engine across the PCIe interface using FPGA direct is left to the developer.
**Q: What is FPGA Link and how fast is it?**
-FPGA Link is based on 4 x 100Gbps links on each FPGA card. The FPGA Link is organized as a ring, with 2 x 100Gbps links to each adjacent card. This enables each FPGA card to send/receive data from an adjacent card at 200Gbps speeds. This is a unsupported feature planned for future release. Details on the FPGA Link interface will be provided in the Shell Interface specification when available.
+FPGA Link is based on 4 x 100Gbps links on each FPGA card. The FPGA Link is organized as a ring, with 2 x 100Gbps links to each adjacent card. This enables each FPGA card to send/receive data from an adjacent card at 200Gbps speeds.
+This is an unsupported feature planned for future release. Details on the FPGA Link interface will be provided in the Shell Interface specification when available.
**Q: What protocol is used for FPGA link?**
The FPGA link is a generic raw streaming interface, no transport protocol is provided for it by AWS. It is expected that developers would take advantage of standard PCIe protocol, Ethernet protocol, or Xilinx's (reliable) Aurora protocol layer for this interface.
-This is a unsupported feature planned for future release. Details on the Shell Interface to the FPGA Link IP blocks are provided in the [Shell Interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md) when available.
+This is an unsupported feature planned for future release. Details on the Shell Interface to the FPGA Link IP blocks are provided in the [Shell Interface specification](./hdk/docs/AWS_Shell_Interface_Specification.md) when available.
**Q: What clock speed does the FPGA utilize?**
@@ -458,13 +485,37 @@ Parent process (pid 8160) has died. This helper process will now exit
*For On Premise runs:*
-You would need a valid [on premise license](./hdk/docs/on_premise_licensing_help.md) provided by Xilinx.
+You would need a valid [on premise license](docs/on_premise_licensing_help.md) provided by Xilinx.
*For runs using the FPGA Developer AMI:* Please contact us through [AWS FPGA Developers forum](https://forums.aws.amazon.com/forum.jspa?forumID=243)
-
**Q: Why does Vivado in GUI mode show up blank ? or Why does Vivado in GUI mode show up as an empty window?**
We have seen this issue when running RDP in 32 bit color mode where Vivado shows up as a blank window.
Please modify RDP options to choose any color depth less than 32 bit and try re-connecting.
+
+**Q: Why did my AFI creation fail with `***ERROR***: DCP has DNA_PORT instantiation, ingestion failed, exiting`?**
+
+AWS does not support creating AFI's with the Device DNA instantiated within your design. Please create your design without instantiating the DNA_PORT primitive to be able to create your AFI.
+
+**Q: How do I know which HDK version I have on my instance/machine? **
+
+Look for the ./hdk/hdk_version.txt file.
+
+**Q: How do I know what my Shell version is? **
+
+The Shell version of an FPGA slot is available through the FPGA Image Management tools after an AFI has been loaded.
+See the description of `fpga-describe-local-image` for more details on retrieving the shell version from a slot.
+Prior to loading an AFI, the state of the FPGA (including shell version) is undefined and non-deterministic.
+
+**Q: How do I know what version of FPGA Image management tools are running on my instance? **
+
+The FPGA Image management tools version is reported with any command executed from these tools.
+See the description of `fpga-describe-local-image` for more details.
+
+**Q: How do I update my existing design with a new release?**
+
+1. Start by pulling changes from a new [aws-fpga github release](https://github.com/aws/aws-fpga)
+1. If the [AWS Shell Interface Specification](./hdk/docs/AWS_Shell_Interface_Specification.md) has changed, update your CL design to conform to the new specification.
+3. Follow the process for AFI generation
diff --git a/Jenkinsfile b/Jenkinsfile
index 05a558f3..4f980364 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -19,6 +19,8 @@ properties([parameters([
booleanParam(name: 'test_sdaccel_scripts', defaultValue: true, description: 'Test SDAccel setup scripts'),
booleanParam(name: 'test_all_sdaccel_examples_fdf', defaultValue: false, description: 'Run Full Developer Flow testing of all SDAccel examples. This overrides test_helloworld_sdaccel_example'),
booleanParam(name: 'test_helloworld_sdaccel_example_fdf', defaultValue: true, description: 'Run Full Developer Flow testing of the Hello World SDAccel example'),
+ booleanParam(name: 'test_all_vitis_examples_fdf', defaultValue: false, description: 'Run Full Developer Flow testing of all Vitis examples. This overrides test_helloworld_sdaccel_example'),
+ booleanParam(name: 'test_helloworld_vitis_example_fdf', defaultValue: true, description: 'Run Full Developer Flow testing of the Hello World Vitis example'),
booleanParam(name: 'debug_dcp_gen', defaultValue: false, description: 'Only run FDF on cl_hello_world. Overrides test_*.'),
booleanParam(name: 'debug_fdf_uram', defaultValue: false, description: 'Debug the FDF for cl_uram_example.'),
booleanParam(name: 'fdf_ddr_comb', defaultValue: false, description: 'run FDF for cl_dram_dma ddr combinations.'),
@@ -44,9 +46,11 @@ boolean test_hdk_fdf = params.get('test_hdk_fdf')
boolean test_sdaccel_scripts = params.get('test_sdaccel_scripts')
boolean test_all_sdaccel_examples_fdf = params.get('test_all_sdaccel_examples_fdf')
boolean test_helloworld_sdaccel_example_fdf = params.get('test_helloworld_sdaccel_example_fdf')
+boolean test_all_vitis_examples_fdf = params.get('test_all_vitis_examples_fdf')
+boolean test_helloworld_vitis_example_fdf = params.get('test_helloworld_vitis_example_fdf')
boolean disable_runtime_tests = params.get('disable_runtime_tests')
-def runtime_sw_cl_names = ['cl_dram_dma', 'cl_hello_world']
+def runtime_sw_cl_names = ['cl_dram_dma', 'cl_hello_world', 'cl_sde']
def dcp_recipe_cl_names = ['cl_dram_dma', 'cl_hello_world']
def dcp_recipe_scenarios = [
// Default values are tested in FDF: A0-B0-C0-DEFAULT
@@ -68,6 +72,7 @@ def fdf_test_names = [
'cl_dram_dma[A1-B0-C0-DEFAULT]',
'cl_hello_world[A0-B0-C0-DEFAULT]',
'cl_hello_world_vhdl',
+ 'cl_sde[A0-B0-C0-DEFAULT]',
'cl_uram_example[2]',
'cl_uram_example[3]',
'cl_uram_example[4]'
@@ -75,7 +80,7 @@ def fdf_test_names = [
boolean debug_dcp_gen = params.get('debug_dcp_gen')
if (debug_dcp_gen) {
- fdf_test_names = ['cl_hello_world[A0-B0-C0-DEFAULT]']
+ fdf_test_names = ['cl_sde[A0-B0-C0-DEFAULT]']
test_markdown_links = false
test_sims = false
test_runtime_software = false
@@ -121,61 +126,85 @@ task_label = [
]
// Put the latest version last
-def xilinx_versions = [ '2017.4', '2018.2', '2018.3' ]
+def xilinx_versions = [ '2019.1', '2019.2', '2020.1' , '2020.2' ]
+
+def vitis_versions = ['2019.2', '2020.1' , '2020.2' ]
// We want the default to be the latest.
def default_xilinx_version = xilinx_versions.last()
def dsa_map = [
- '2017.4' : [ 'DYNAMIC_5_0' : 'dyn'],
- '2018.2' : [ 'DYNAMIC_5_0' : 'dyn'],
- '2018.3' : [ 'DYNAMIC_5_0' : 'dyn']
+ '2019.1' : [ 'DYNAMIC_5_0' : 'dyn'],
+]
+
+def xsa_map = [
+ '2019.2' : [ 'DYNAMIC':'dyn'],
+ '2020.1' : [ 'DYNAMIC':'dyn'],
+ '2020.2' : [ 'DYNAMIC':'dyn']
]
def sdaccel_example_default_map = [
- '2017.4' : [
- 'Hello_World_1ddr': 'SDAccel/examples/xilinx/getting_started/host/helloworld_ocl',
- 'Gmem_2Banks_2ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/gmem_2banks_ocl',
- 'kernel_3ddr_bandwidth_4ddr': 'SDAccel/examples/aws/kernel_3ddr_bandwidth',
- 'Kernel_Global_Bw_4ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/kernel_global_bandwidth',
+ '2019.1' : [
+ 'Hello_World_1ddr': 'SDAccel/examples/xilinx/getting_started/hello_world/helloworld_ocl',
+ 'Gmem_2Banks_2ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/gmem_2banks_ocl_5.0_shell',
+ 'Kernel_Global_Bw_4ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/kernel_global_bandwidth_5.0_shell',
'RTL_Vadd_Debug': 'SDAccel/examples/xilinx/getting_started/rtl_kernel/rtl_vadd_hw_debug'
+ ]
+]
+
+def vitis_example_default_map = [
+ '2019.2' : [
+ 'Hello_World_1ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_helloworld',
+ 'Gmem_2Banks_2ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_gmem_2banks',
+ 'Kernel_Global_Bw_4ddr': 'Vitis/examples/xilinx/cpp_kernels/kernel_global_bandwidth',
+ 'RTL_Vadd_Debug': 'Vitis/examples/xilinx/rtl_kernels/rtl_vadd_hw_debug'
],
- '2018.2' : [
- 'Hello_World_1ddr': 'SDAccel/examples/xilinx/getting_started/host/helloworld_ocl',
- 'Gmem_2Banks_2ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/gmem_2banks_ocl',
- 'kernel_3ddr_bandwidth_4ddr': 'SDAccel/examples/aws/kernel_3ddr_bandwidth',
- 'Kernel_Global_Bw_4ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/kernel_global_bandwidth',
- 'RTL_Vadd_Debug': 'SDAccel/examples/xilinx/getting_started/rtl_kernel/rtl_vadd_hw_debug'
+ '2020.1' : [
+ 'Hello_World_1ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_helloworld',
+ 'Gmem_2Banks_2ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_gmem_2banks',
+ 'Kernel_Global_Bw_4ddr': 'Vitis/examples/xilinx/cpp_kernels/kernel_global_bandwidth',
+ 'RTL_Vadd_Debug': 'Vitis/examples/xilinx/rtl_kernels/rtl_vadd_hw_debug',
+ 'gemm_blas': 'Vitis/examples/xilinx/library_examples/gemm',
+ 'gzip_app': 'Vitis/examples/xilinx/library_examples/gzip_app'
],
- '2018.3' : [
- 'Hello_World_1ddr': 'SDAccel/examples/xilinx/getting_started/host/helloworld_ocl',
- 'Gmem_2Banks_2ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/gmem_2banks_ocl',
- 'Kernel_Global_Bw_4ddr': 'SDAccel/examples/xilinx/getting_started/kernel_to_gmem/kernel_global_bandwidth',
- 'RTL_Vadd_Debug': 'SDAccel/examples/xilinx/getting_started/rtl_kernel/rtl_vadd_hw_debug'
+ '2020.2' : [
+ 'Hello_World_1ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_helloworld',
+ 'Gmem_2Banks_2ddr': 'Vitis/examples/xilinx/ocl_kernels/cl_gmem_2banks',
+ 'Kernel_Global_Bw_4ddr': 'Vitis/examples/xilinx/cpp_kernels/kernel_global_bandwidth',
+ 'RTL_Vadd_Debug': 'Vitis/examples/xilinx/rtl_kernels/rtl_vadd_hw_debug',
+ 'gemm_blas': 'Vitis/examples/xilinx/library_examples/gemm',
+ 'gzip_app': 'Vitis/examples/xilinx/library_examples/gzip_app'
]
]
def simulator_tool_default_map = [
- '2017.4' : [
- 'vivado': 'xilinx/SDx/2017.4_04112018',
- 'vcs': 'vcs-mx/L-2016.06-1',
- 'questa': 'questa/10.6b',
- 'ies': 'incisive/15.20.063'
- ],
- '2018.2' : [
- 'vivado': 'xilinx/SDx/2018.2_06142018',
- 'vcs': 'vcs-mx/N-2017.12-SP1-1',
+ '2019.1' : [
+ 'vivado': 'xilinx/SDx/2019.1.op2552052',
+ 'vcs': 'synopsys/vcs-mx/N-2017.12-SP2',
'questa': 'questa/10.6c_1',
'ies': 'incisive/15.20.063'
],
- '2018.3' : [
- 'vivado': 'xilinx/SDx/2018.3_1207',
- 'vcs': 'vcs-mx/N-2017.12-SP1-1',
- 'questa': 'questa/10.6c_1',
+ '2019.2' : [
+ 'vivado': 'xilinx/Vivado/2019.2',
+ 'vcs': 'synopsys/vcs-mx/O-2018.09-SP2-1',
+ 'questa': 'questa/2019.2',
'ies': 'incisive/15.20.063'
+ ],
+ '2020.1' : [
+ 'vivado': 'xilinx/Vivado/2020.1',
+ 'vcs': 'synopsys/vcs-mx/P-2019.06-SP1-1',
+ 'questa': 'questa/2019.4',
+ 'ies': 'incisive/15.20.079'
+ ],
+ '2020.2' : [
+ 'vivado': 'xilinx/Vivado/2020.2',
+ 'vcs': 'synopsys/vcs-mx/Q-2020.03',
+ 'questa': 'questa/2020.2',
+ 'ies': 'incisive/15.20.083'
]
]
+// ies 073 is not available for download
// Get serializable entry set
@NonCPS def entrySet(m) {m.collect {k, v -> [key: k, value: v]}}
@@ -196,7 +225,7 @@ def get_task_label(Map args=[ : ]) {
}
if (params.internal_simulations) {
echo "internal simulation agent requested"
- task_label = 'f1'
+ task_label = 'f1_3rd_party_sims'
}
echo "Label Requested: $task_label"
@@ -269,7 +298,7 @@ def test_run_py_bindings() {
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_sdk_env_al2.sh "py_bindings"
+ source $WORKSPACE/shared/tests/bin/setup_test_sdk_env.sh "py_bindings"
python2.7 -m pytest -v $WORKSPACE/${test} --junit-xml $WORKSPACE/${report_file}
"""
} catch (exc) {
@@ -367,7 +396,7 @@ def test_fpga_all_slots() {
}
catch (exception) {
echo "Test FPGA Tools All Slots failed"
- input message: "1 slot FPGA Tools test failed. Click Proceed or Abort when you are done debugging on the instance."
+ input message: "All slot FPGA Tools test failed. Click Proceed or Abort when you are done debugging on the instance."
throw exception
}
finally {
@@ -395,7 +424,6 @@ def test_run_non_root_access() {
source $WORKSPACE/shared/tests/bin/setup_test_sdk_env.sh
newgrp fpgauser
export SDK_DIR="${WORKSPACE}/sdk"
- source $WORKSPACE/shared/tests/bin/setup_test_env.sh
python2.7 -m pytest -v $WORKSPACE/${test} --junit-xml $WORKSPACE/${report_file}
"""
} catch (exc) {
@@ -492,7 +520,7 @@ if (test_fpga_tools) {
if (test_sims) {
all_tests['Run Sims'] = {
stage('Run Sims') {
- def cl_names = ['cl_uram_example', 'cl_dram_dma', 'cl_hello_world', 'cl_sde']
+ def cl_names = ['cl_vhdl_hello_world', 'cl_uram_example', 'cl_dram_dma', 'cl_hello_world', 'cl_sde']
def simulators = ['vivado']
def sim_nodes = [:]
if(params.internal_simulations) {
@@ -505,7 +533,16 @@ if (test_sims) {
String xilinx_version = y
String cl_name = x
String simulator = z
- String node_name = "Sim ${cl_name} ${xilinx_version}"
+ if((cl_name == 'cl_vhdl_hello_world') && (simulator == 'ies')) {
+ println ("Skipping Simulator: ${simulator} CL: ${cl_name}")
+ continue;
+ }
+ String cl_dir_name = cl_name
+ if(cl_name == 'cl_vhdl_hello_world') {
+ cl_dir_name = "cl_hello_world_vhdl"
+ }
+
+ String node_name = "Sim ${cl_name} ${xilinx_version} ${simulator}"
String key = "test_${cl_name}__"
String report_file = "test_sims_${cl_name}_${xilinx_version}.xml"
def tool_module_map = simulator_tool_default_map.get(xilinx_version)
@@ -525,19 +562,21 @@ if (test_sims) {
sh """
set -e
module purge
- module load python/2.7.9
+ module load python/3.7.2
+ module load python/2.7.14
+ module load batch
module load ${vivado_module}
module load ${vcs_module}
module load ${questa_module}
module load ${ies_module}
source $WORKSPACE/hdk_setup.sh
- python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator}
+ python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator} --batch 'TRUE'
"""
} else {
sh """
set -e
source $WORKSPACE/shared/tests/bin/setup_test_hdk_env.sh
- python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator}
+ python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator} --batch 'FALSE'
"""
}
} catch (exc) {
@@ -545,7 +584,7 @@ if (test_sims) {
throw exc
} finally {
run_junit(report_file)
- archiveArtifacts artifacts: "hdk/cl/examples/${cl_name}/**/*.sim.log", fingerprint: true
+ archiveArtifacts artifacts: "hdk/cl/examples/${cl_dir_name}/**/*.sim.log", fingerprint: true
}
}
}
@@ -587,15 +626,15 @@ if (test_xdma) {
//=============================================================================
// Python Binding Test
//=============================================================================
-if (test_py_bindings) {
- all_tests['Test Python Bindings'] = {
- stage('Test Python Bindings') {
- node('f1.2xl_runtime_test_al2') {
- test_run_py_bindings()
- }
- }
- }
-}
+// if (test_py_bindings) {
+// all_tests['Test Python Bindings'] = {
+// stage('Test Python Bindings') {
+// node('f1.2xl_runtime_test_al2') {
+// test_run_py_bindings()
+// }
+// }
+// }
+// }
//=============================================================================
// Precompiled Runtime Tests
@@ -861,113 +900,369 @@ if (test_hdk_fdf) {
// SDAccel Tests
//=============================================================================
-if (test_sdaccel_scripts) {
- all_tests['Test SDAccel Scripts'] = {
- stage('Test SDAccel Scripts') {
- def nodes = [:]
- for (def xilinx_version in xilinx_versions) {
+// if (test_sdaccel_scripts) {
+// all_tests['Test SDAccel Scripts'] = {
+// stage('Test SDAccel Scripts') {
+// def nodes = [:]git
+// for (def xilinx_version in xilinx_versions) {
+//
+// String node_label = get_task_label(task: 'source_scripts', xilinx_version: xilinx_version)
+// String node_name = "Test SDAccel Scripts ${xilinx_version}"
+// nodes[node_name] = {
+// node(node_label) {
+// String report_file = "test_sdaccel_scripts_${xilinx_version}.xml"
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_env.sh
+// python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_sdaccel_scripts.py --junit-xml $WORKSPACE/${report_file}
+// """
+// } finally {
+// run_junit(report_file)
+// }
+// }
+// }
+// }
+// parallel nodes
+// }
+// }
+// }
+
+// if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
+// all_tests['Run SDAccel Tests'] = {
+// String sdaccel_examples_list = 'sdaccel_examples_list.json'
+//
+// def sdaccel_all_version_stages = [:]
+//
+// for (def version in xilinx_versions) {
+//
+// String xilinx_version = version
+// String sdaccel_base_stage_name = "SDx FDF $xilinx_version"
+// String sdaccel_find_stage_name = "SDx Find tests $xilinx_version"
+//
+// sdaccel_all_version_stages[sdaccel_base_stage_name] = {
+// stage (sdaccel_find_stage_name) {
+//
+// node(get_task_label(task: 'find_tests', xilinx_version: xilinx_version)) {
+//
+// checkout scm
+// String report_file = "test_find_sdaccel_examples_${xilinx_version}.xml"
+//
+// try {
+// sh """
+// rm -rf ${sdaccel_examples_list}
+// """
+// } catch(error) {
+// // Ignore any errors
+// echo "Failed to clean ${sdaccel_examples_list}"
+// }
+//
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
+// python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_find_sdaccel_examples.py --junit-xml $WORKSPACE/${report_file} --xilinxVersion ${xilinx_version}
+// """
+// } catch (exc) {
+// echo "Could not find tests. Please check the repository."
+// throw exc
+// } finally {
+// run_junit(report_file)
+// archiveArtifacts artifacts: "${sdaccel_examples_list}.*", fingerprint: true
+//
+// }
+//
+// // Only run the hello world test by default
+// //def example_map = [ 'Hello_World': 'SDAccel/examples/xilinx/getting_started/host/helloworld_ocl' ]
+// def example_map = sdaccel_example_default_map.get(xilinx_version)
+//
+// // Run all examples when parameter set
+// if (test_all_sdaccel_examples_fdf) {
+// example_map = readJSON file: sdaccel_examples_list
+// }
+//
+// def sdaccel_build_stages = [:]
+//
+// for ( def e in entrySet(example_map) ) {
+//
+// String test_key = e.key
+// def dsa_map_for_version = dsa_map.get(xilinx_version)
+//
+// // dsa = [ 4DDR: 4ddr ]
+// for ( def dsa in entrySet(dsa_map_for_version) ) {
+//
+// String build_name = "SDx ${e.key}_${dsa.value}_${xilinx_version}"
+// String example_path = e.value
+//
+// String dsa_name = dsa.key
+// String dsa_rte_name = dsa.value
+//
+// String sw_emu_stage_name = "SDx SW_EMU ${build_name}"
+// String hw_emu_stage_name = "SDx HW_EMU ${build_name}"
+// String hw_stage_name = "SDx HW ${build_name}"
+// String create_afi_stage_name = "SDx AFI ${build_name}"
+// String run_example_stage_name = "SDx RUN ${build_name}"
+//
+// String sw_emu_report_file = "sdaccel_sw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
+// String hw_emu_report_file = "sdaccel_hw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
+// String hw_report_file = "sdaccel_hw_${e.key}_${dsa.value}_${xilinx_version}.xml"
+// String create_afi_report_file = "sdaccel_create_afi_${e.key}_${dsa.value}_${xilinx_version}.xml"
+// String run_example_report_file = "sdaccel_run_${e.key}_${dsa.value}_${xilinx_version}.xml"
+//
+// String description_file = "${example_path}/description.json"
+// def description_json = ["targets":["hw","hw_emu","sw_emu"]]
+//
+// try {
+// description_json = readJSON file: description_file
+// }
+// catch (exc) {
+// echo "Could not read the file: ${description_file}"
+// throw exc
+// }
+//
+// boolean test_sw_emu_supported = true
+// boolean test_hw_emu_supported = true
+//
+// if(description_json["targets"]) {
+// if(description_json["targets"].contains("sw_emu")) {
+// test_sw_emu_supported = true
+// echo "Description file ${description_file} has target sw_emu"
+// } else {
+// test_sw_emu_supported = false
+// echo "Description file ${description_file} does not have target sw_emu"
+// }
+// if(description_json["targets"].contains("hw_emu")) {
+// test_hw_emu_supported = true
+// echo "Description file ${description_file} has target sw_emu"
+// } else {
+// test_hw_emu_supported = false
+// echo "Description file ${description_file} does not have target sw_emu"
+// }
+// } else {
+// echo "Description json did not have a 'target' key"
+// }
+//
+// sdaccel_build_stages[build_name] = {
+// if(test_sw_emu_supported) {
+// stage(sw_emu_stage_name) {
+// node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
+// export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
+// python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_sw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${sw_emu_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+// """
+// } catch (error) {
+// echo "${sw_emu_stage_name} SW EMU Build generation failed"
+// archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+// throw error
+// } finally {
+// run_junit(sw_emu_report_file)
+// git_cleanup()
+// }
+// }
+// }
+// }
+//
+// if(test_hw_emu_supported) {
+// stage(hw_emu_stage_name) {
+// node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
+// export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
+// python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_hw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_emu_report_file} --timeout=21600 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+// """
+// } catch (error) {
+// echo "${hw_emu_stage_name} HW EMU Build generation failed"
+// archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+// throw error
+// } finally {
+// run_junit(hw_emu_report_file)
+// git_cleanup()
+// }
+// }
+// }
+// }
+//
+// stage(hw_stage_name) {
+// node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
+// export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
+// python2.7 -m pytest -s -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_hw_build --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_report_file} --timeout=36000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+// """
+// } catch (error) {
+// echo "${hw_stage_name} HW Build generation failed"
+// archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+// throw error
+// } finally {
+// run_junit(hw_report_file)
+// git_cleanup()
+// }
+// }
+// }
+//
+// stage(create_afi_stage_name) {
+// node(get_task_label(task: 'create_afi', xilinx_version: xilinx_version)) {
+//
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
+// export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
+// python2.7 -m pytest -s -v $WORKSPACE/SDAccel/tests/test_create_sdaccel_afi.py::TestCreateSDAccelAfi::test_create_sdaccel_afi --examplePath ${example_path} --junit-xml $WORKSPACE/${create_afi_report_file} --timeout=18000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+// """
+// } catch (error) {
+// echo "${create_afi_stage_name} Create AFI failed"
+// archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+// throw error
+// } finally {
+//
+// String to_aws_dir = "${example_path}/to_aws"
+//
+// if (fileExists(to_aws_dir)) {
+// sh "rm -rf ${to_aws_dir}"
+// }
+// run_junit(create_afi_report_file)
+// git_cleanup()
+// }
+// }
+// }
+//
+// stage(run_example_stage_name) {
+//
+// if(disable_runtime_tests) {
+// echo "Runtime tests disabled. Not running ${run_example_stage_name}"
+// } else {
+// node(get_task_label(task: 'runtime', xilinx_version: xilinx_version)) {
+//
+// checkout scm
+// try {
+// sh """
+// set -e
+// source $WORKSPACE/shared/tests/bin/setup_test_runtime_sdaccel_env.sh
+// export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
+// python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_run_sdaccel_example.py::TestRunSDAccelExample::test_run_sdaccel_example --examplePath ${example_path} --junit-xml $WORKSPACE/${run_example_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+// """
+// } catch (error) {
+// echo "${run_example_stage_name} Runtime example failed"
+// archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+// input message: "SDAccel Runtime test failed. Click Proceed or Abort when you are done debugging on the instance."
+// throw error
+// } finally {
+// run_junit(run_example_report_file)
+// git_cleanup()
+// }
+// }
+// } //else
+//
+// }
+//
+// } // sdaccel_build_stages[ e.key ]
+//
+// } //for ( def dsa in entrySet(dsa_map_for_version) ) {
+// } // for ( e in list_map )
+//
+// parallel sdaccel_build_stages
+// }
+// }
+// }
+// } //for (def xilinx_version in xilinx_versions) {
+// parallel sdaccel_all_version_stages
+// }
+// }
- String node_label = get_task_label(task: 'source_scripts', xilinx_version: xilinx_version)
- String node_name = "Test SDAccel Scripts ${xilinx_version}"
- nodes[node_name] = {
- node(node_label) {
- String report_file = "test_sdaccel_scripts_${xilinx_version}.xml"
- checkout scm
- try {
- sh """
- set -e
- source $WORKSPACE/shared/tests/bin/setup_test_env.sh
- python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_sdaccel_scripts.py --junit-xml $WORKSPACE/${report_file}
- """
- } finally {
- run_junit(report_file)
- }
- }
- }
- }
- parallel nodes
- }
- }
-}
-
-if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
- all_tests['Run SDAccel Tests'] = {
- String sdaccel_examples_list = 'sdaccel_examples_list.json'
+//=============================================================================
+// Vitis Tests
+//=============================================================================
+if (test_helloworld_vitis_example_fdf || test_all_vitis_examples_fdf) {
+ all_tests['Run Vitis Tests'] = {
+ String vitis_examples_list = 'vitis_examples_list.json'
- def sdaccel_all_version_stages = [:]
+ def vitis_all_version_stages = [:]
- for (def version in xilinx_versions) {
+ for (def version in vitis_versions) {
String xilinx_version = version
- String sdaccel_base_stage_name = "SDx FDF $xilinx_version"
- String sdaccel_find_stage_name = "SDx Find tests $xilinx_version"
+ String vitis_base_stage_name = "Vitis FDF $xilinx_version"
+ String vitis_find_stage_name = "Vitis Find tests $xilinx_version"
- sdaccel_all_version_stages[sdaccel_base_stage_name] = {
- stage (sdaccel_find_stage_name) {
+ vitis_all_version_stages[vitis_base_stage_name] = {
+ stage (vitis_find_stage_name) {
node(get_task_label(task: 'find_tests', xilinx_version: xilinx_version)) {
checkout scm
- String report_file = "test_find_sdaccel_examples_${xilinx_version}.xml"
+ String report_file = "test_find_vitis_examples_${xilinx_version}.xml"
try {
sh """
- rm -rf ${sdaccel_examples_list}
+ rm -rf ${vitis_examples_list}
"""
} catch(error) {
// Ignore any errors
- echo "Failed to clean ${sdaccel_examples_list}"
+ echo "Failed to clean ${vitis_examples_list}"
}
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
- python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_find_sdaccel_examples.py --junit-xml $WORKSPACE/${report_file}
+ source $WORKSPACE/shared/tests/bin/setup_test_build_vitis_env.sh
+ python2.7 -m pytest -v $WORKSPACE/Vitis/tests/test_find_vitis_examples.py --junit-xml $WORKSPACE/${report_file} --xilinxVersion ${xilinx_version}
"""
} catch (exc) {
echo "Could not find tests. Please check the repository."
throw exc
} finally {
run_junit(report_file)
+ archiveArtifacts artifacts: "${vitis_examples_list}.*", fingerprint: true
+
}
- // Only run the hello world test by default
- //def example_map = [ 'Hello_World': 'SDAccel/examples/xilinx/getting_started/host/helloworld_ocl' ]
- def example_map = sdaccel_example_default_map.get(xilinx_version)
+ def example_map = vitis_example_default_map.get(xilinx_version)
// Run all examples when parameter set
- if (test_all_sdaccel_examples_fdf) {
- example_map = readJSON file: sdaccel_examples_list
+ if (test_all_vitis_examples_fdf) {
+ example_map = readJSON file: vitis_examples_list
}
- def sdaccel_build_stages = [:]
+ def vitis_build_stages = [:]
for ( def e in entrySet(example_map) ) {
String test_key = e.key
- def dsa_map_for_version = dsa_map.get(xilinx_version)
+ def xsa_map_for_version = xsa_map.get(xilinx_version)
// dsa = [ 4DDR: 4ddr ]
- for ( def dsa in entrySet(dsa_map_for_version) ) {
+ for ( def dsa in entrySet(xsa_map_for_version) ) {
- String build_name = "SDx ${e.key}_${dsa.value}_${xilinx_version}"
+ String build_name = "Vitis ${e.key}_${dsa.value}_${xilinx_version}"
String example_path = e.value
String dsa_name = dsa.key
String dsa_rte_name = dsa.value
- String sw_emu_stage_name = "SDx SW_EMU ${build_name}"
- String hw_emu_stage_name = "SDx HW_EMU ${build_name}"
- String hw_stage_name = "SDx HW ${build_name}"
- String create_afi_stage_name = "SDx AFI ${build_name}"
- String run_example_stage_name = "SDx RUN ${build_name}"
+ String sw_emu_stage_name = "Vitis SW_EMU ${build_name}"
+ String hw_emu_stage_name = "Vitis HW_EMU ${build_name}"
+ String hw_stage_name = "Vitis HW ${build_name}"
+ String create_afi_stage_name = "Vitis AFI ${build_name}"
+ String run_example_stage_name = "Vitis RUN ${build_name}"
- String sw_emu_report_file = "sdaccel_sw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
- String hw_emu_report_file = "sdaccel_hw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
- String hw_report_file = "sdaccel_hw_${e.key}_${dsa.value}_${xilinx_version}.xml"
- String create_afi_report_file = "sdaccel_create_afi_${e.key}_${dsa.value}_${xilinx_version}.xml"
- String run_example_report_file = "sdaccel_run_${e.key}_${dsa.value}_${xilinx_version}.xml"
+ String sw_emu_report_file = "vitis_sw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
+ String hw_emu_report_file = "vitis_hw_emu_${e.key}_${dsa.value}_${xilinx_version}.xml"
+ String hw_report_file = "vitis_hw_${e.key}_${dsa.value}_${xilinx_version}.xml"
+ String create_afi_report_file = "vitis_create_afi_${e.key}_${dsa.value}_${xilinx_version}.xml"
+ String run_example_report_file = "vitis_run_${e.key}_${dsa.value}_${xilinx_version}.xml"
String description_file = "${example_path}/description.json"
def description_json = ["targets":["hw","hw_emu","sw_emu"]]
@@ -980,7 +1275,8 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
throw exc
}
- boolean test_sw_emu_supported = true
+ boolean test_sw_emu_supported = false
+ boolean test_hw_emu_supported = false
if(description_json["targets"]) {
if(description_json["targets"].contains("sw_emu")) {
@@ -990,11 +1286,21 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
test_sw_emu_supported = false
echo "Description file ${description_file} does not have target sw_emu"
}
+ if(description_json["targets"].contains("hw_emu")) {
+ test_hw_emu_supported = true
+ echo "Description file ${description_file} has target sw_emu"
+ } else {
+ test_hw_emu_supported = false
+ echo "Description file ${description_file} does not have target sw_emu"
+ }
} else {
echo "Description json did not have a 'target' key"
+ test_sw_emu_supported = true
+ test_hw_emu_supported = true
}
- sdaccel_build_stages[build_name] = {
+ vitis_build_stages[build_name] = {
+
if(test_sw_emu_supported) {
stage(sw_emu_stage_name) {
node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
@@ -1002,9 +1308,8 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
- export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
- python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_sw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${sw_emu_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+ source $WORKSPACE/shared/tests/bin/setup_test_build_vitis_env.sh
+ python2.7 -m pytest -v $WORKSPACE/Vitis/tests/test_build_vitis_example.py::TestBuildVitisExample::test_sw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${sw_emu_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
"""
} catch (error) {
echo "${sw_emu_stage_name} SW EMU Build generation failed"
@@ -1018,23 +1323,24 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
}
}
- stage(hw_emu_stage_name) {
- node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
- checkout scm
- try {
- sh """
- set -e
- source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
- export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
- python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_hw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_emu_report_file} --timeout=21600 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
- """
- } catch (error) {
- echo "${hw_emu_stage_name} HW EMU Build generation failed"
- archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
- throw error
- } finally {
- run_junit(hw_emu_report_file)
- git_cleanup()
+ if(test_hw_emu_supported) {
+ stage(hw_emu_stage_name) {
+ node(get_task_label(task: 'sdaccel_builds', xilinx_version: xilinx_version)) {
+ checkout scm
+ try {
+ sh """
+ set -e
+ source $WORKSPACE/shared/tests/bin/setup_test_build_vitis_env.sh
+ python2.7 -m pytest -v $WORKSPACE/Vitis/tests/test_build_vitis_example.py::TestBuildVitisExample::test_hw_emu --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_emu_report_file} --timeout=21600 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+ """
+ } catch (error) {
+ echo "${hw_emu_stage_name} HW EMU Build generation failed"
+ archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
+ throw error
+ } finally {
+ run_junit(hw_emu_report_file)
+ git_cleanup()
+ }
}
}
}
@@ -1045,9 +1351,8 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
- export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
- python2.7 -m pytest -s -v $WORKSPACE/SDAccel/tests/test_build_sdaccel_example.py::TestBuildSDAccelExample::test_hw_build --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_report_file} --timeout=36000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+ source $WORKSPACE/shared/tests/bin/setup_test_build_vitis_env.sh
+ python2.7 -m pytest -s -v $WORKSPACE/Vitis/tests/test_build_vitis_example.py::TestBuildVitisExample::test_hw_build --examplePath ${example_path} --junit-xml $WORKSPACE/${hw_report_file} --timeout=36000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
"""
} catch (error) {
echo "${hw_stage_name} HW Build generation failed"
@@ -1067,9 +1372,8 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_build_sdaccel_env.sh
- export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
- python2.7 -m pytest -s -v $WORKSPACE/SDAccel/tests/test_create_sdaccel_afi.py::TestCreateSDAccelAfi::test_create_sdaccel_afi --examplePath ${example_path} --junit-xml $WORKSPACE/${create_afi_report_file} --timeout=18000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+ source $WORKSPACE/shared/tests/bin/setup_test_build_vitis_env.sh
+ python2.7 -m pytest -s -v $WORKSPACE/Vitis/tests/test_create_vitis_afi.py::TestCreateVitisAfi::test_create_vitis_afi --examplePath ${example_path} --junit-xml $WORKSPACE/${create_afi_report_file} --timeout=18000 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
"""
} catch (error) {
echo "${create_afi_stage_name} Create AFI failed"
@@ -1099,14 +1403,13 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
try {
sh """
set -e
- source $WORKSPACE/shared/tests/bin/setup_test_runtime_sdaccel_env.sh
- export AWS_PLATFORM=\$AWS_PLATFORM_${dsa_name}
- python2.7 -m pytest -v $WORKSPACE/SDAccel/tests/test_run_sdaccel_example.py::TestRunSDAccelExample::test_run_sdaccel_example --examplePath ${example_path} --junit-xml $WORKSPACE/${run_example_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
+ source $WORKSPACE/shared/tests/bin/setup_test_runtime_vitis_env.sh
+ python2.7 -m pytest -v $WORKSPACE/Vitis/tests/test_run_vitis_example.py::TestRunVitisExample::test_run_vitis_example --examplePath ${example_path} --junit-xml $WORKSPACE/${run_example_report_file} --timeout=14400 --rteName ${dsa_rte_name} --xilinxVersion ${xilinx_version}
"""
} catch (error) {
echo "${run_example_stage_name} Runtime example failed"
archiveArtifacts artifacts: "${example_path}/**", fingerprint: true
- input message: "SDAccel Runtime test failed. Click Proceed or Abort when you are done debugging on the instance."
+ input message: "Vitis Runtime test failed. Click Proceed or Abort when you are done debugging on the instance."
throw error
} finally {
run_junit(run_example_report_file)
@@ -1117,20 +1420,19 @@ if (test_helloworld_sdaccel_example_fdf || test_all_sdaccel_examples_fdf) {
}
- } // sdaccel_build_stages[ e.key ]
+ } // vitis_build_stages[ e.key ]
- } //for ( def dsa in entrySet(dsa_map_for_version) ) {
+ } //for ( def dsa in entrySet(xsa_map_for_version) ) {
} // for ( e in list_map )
- parallel sdaccel_build_stages
+ parallel vitis_build_stages
}
}
}
} //for (def xilinx_version in xilinx_versions) {
- parallel sdaccel_all_version_stages
+ parallel vitis_all_version_stages
}
}
-
//=============================================================================
// SDK Tests
//=============================================================================
diff --git a/Jenkinsfile_int_sims b/Jenkinsfile_int_sims
new file mode 100644
index 00000000..51a2d46f
--- /dev/null
+++ b/Jenkinsfile_int_sims
@@ -0,0 +1,226 @@
+#!/usr/bin/env groovy
+
+//=============================================================================
+// Pipeline parameters
+//=============================================================================
+properties([parameters([
+ string(name: 'branch', defaultValue: ''),
+ booleanParam(name: 'test_sims', defaultValue: true, description: 'Run all Simulations'),
+ booleanParam(name: 'internal_simulations', defaultValue: true, description: 'This option asks for default agent from Jenkins')
+])])
+
+//=============================================================================
+// Configuration
+//=============================================================================
+
+boolean test_sims = params.get('test_sims')
+
+//=============================================================================
+// Globals
+//=============================================================================
+
+// Map that contains stages of tests
+def all_tests = [:]
+
+// Task to Label map
+task_label = [
+ 'create_afi': 't2.l_50',
+ 'simulation': 'z1d.l',
+ 'dcp_gen': 'z1d.2xl',
+ 'runtime': 'f1.2xl',
+ 'runtime_all_slots': 'f1.16xl',
+ 'source_scripts': 'c4.xl',
+ 'md_links': 'c4.xl',
+ 'find_tests': 't2.l_50',
+ 'sdaccel_builds': 'z1d.2xl'
+]
+
+// Put the latest version last
+def xilinx_versions = [ '2020.2' ]
+
+// We want the default to be the latest.
+def default_xilinx_version = xilinx_versions.last()
+
+def simulator_tool_default_map = [
+ '2019.2' : [
+ 'vivado': 'xilinx/Vivado/2019.2',
+ 'vcs': 'synopsys/vcs-mx/O-2018.09-SP2-1',
+ 'questa': 'questa/2019.2',
+ 'ies': 'incisive/15.20.063'
+ ],
+ '2020.1' : [
+ 'vivado': 'xilinx/Vivado/2020.1',
+ 'vcs': 'synopsys/vcs-mx/P-2019.06-SP1-1',
+ 'questa': 'questa/2019.4',
+ 'ies': 'incisive/15.20.079'
+ ],
+ '2020.2' : [
+ 'vivado': 'xilinx/Vivado/2020.2',
+ 'vcs': 'synopsys/vcs/Q-2020.03',
+ 'questa': 'questa/2019.4_3',
+ 'ies': 'incisive/15.20.083'
+ ]
+]
+
+
+// Get serializable entry set
+@NonCPS def entrySet(m) {m.collect {k, v -> [key: k, value: v]}}
+
+@NonCPS
+def is_public_repo() {
+ echo "Change URL: ${env.CHANGE_URL}"
+ return (env.CHANGE_URL =~ /^(\S+)?aws-fpga\/pull\/(\d+)$/)
+}
+
+def get_task_label(Map args=[ : ]) {
+ String task_label = args.xilinx_version + '_' + task_label[args.task]
+
+ if (params.internal_simulations) {
+ echo "internal simulation agent requested"
+ task_label = 'f1_3rd_party_sims'
+ }
+
+ echo "Label Requested: $task_label"
+ return task_label
+}
+
+def abort_previous_running_builds() {
+ def hi = Hudson.instance
+ def pname = env.JOB_NAME.split('/')[0]
+
+ hi.getItem(pname).getItem(env.JOB_BASE_NAME).getBuilds().each{ build ->
+ def executor = build.getExecutor()
+
+ if (build.number != currentBuild.number && build.number < currentBuild.number && executor != null) {
+ executor.interrupt(
+ Result.ABORTED,
+ new CauseOfInterruption.UserInterruption("Aborted by #${currentBuild.number}"))
+ println("Aborted previous running build #${build.number}")
+ } else {
+ println("Build is not running or is current build, not aborting - #${build.number}")
+ }
+ }
+}
+
+// Wait for input if we are running on a public repo to avoid malicious PRS
+if (is_public_repo()) {
+ input "Running on a public repository, do you want to proceed with running the tests?"
+} else {
+ echo "Running on a private repository"
+}
+
+
+//Abort previous builds on PR when we push new commits
+// env.CHANGE_ID is only available on PR's and not on branch builds
+if (env.CHANGE_ID) {
+ abort_previous_running_builds()
+}
+
+
+def run_junit(String report_file) {
+
+ if (fileExists(report_file)) {
+ junit healthScaleFactor: 10.0, testResults: report_file
+ } else {
+ echo "Pytest wasn't run for stage. Report file not generated: ${report_file}"
+ }
+}
+
+def git_cleanup() {
+ sh """
+ set -e
+ sudo git reset --hard
+ sudo git clean -fdx
+ """
+}
+
+//=============================================================================
+// Simulations
+//=============================================================================
+if (test_sims) {
+ all_tests['Run Sims'] = {
+ stage('Run Sims') {
+ def cl_names = ['cl_vhdl_hello_world', 'cl_uram_example', 'cl_dram_dma', 'cl_hello_world', 'cl_sde']
+ def simulators = ['vivado']
+ def sim_nodes = [:]
+ if(params.internal_simulations) {
+ simulators = ['vcs', 'ies', 'questa', 'vivado']
+ }
+
+ for (x in cl_names) {
+ for (y in xilinx_versions) {
+ for (z in simulators) {
+ String xilinx_version = y
+ String cl_name = x
+ String simulator = z
+ if((cl_name == 'cl_vhdl_hello_world') && (simulator == 'ies')) {
+ println ("Skipping Simulator: ${simulator} CL: ${cl_name}")
+ continue;
+ }
+ String cl_dir_name = cl_name
+ if(cl_name == 'cl_vhdl_hello_world') {
+ cl_dir_name = "cl_hello_world_vhdl"
+ }
+ String node_name = "Sim ${cl_name} ${xilinx_version} ${simulator}"
+ String key = "test_${cl_name}__"
+ String report_file = "test_sims_${cl_name}_${xilinx_version}.xml"
+ def tool_module_map = simulator_tool_default_map.get(xilinx_version)
+ String vcs_module = tool_module_map.get('vcs')
+ String questa_module = tool_module_map.get('questa')
+ String ies_module = tool_module_map.get('ies')
+ String vivado_module = tool_module_map.get('vivado')
+
+ if(params.internal_simulations) {
+ report_file = "test_sims_${cl_name}_${xilinx_version}_${simulator}.xml"
+ }
+ sim_nodes[node_name] = {
+ node(get_task_label(task: 'simulation', xilinx_version: xilinx_version)) {
+ checkout scm
+ try {
+ if(params.internal_simulations) {
+ sh """
+ set -e
+ module purge
+ module load python/3.7.2
+ module load python/2.7.14
+ module load slurm
+ module load ${vivado_module}
+ module load ${vcs_module}
+ module load ${questa_module}
+ module load ${ies_module}
+ source $WORKSPACE/hdk_setup.sh
+ python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator} --batch 'TRUE'
+ """
+ } else {
+ sh """
+ set -e
+ source $WORKSPACE/shared/tests/bin/setup_test_hdk_env.sh
+ python2.7 -m pytest -v $WORKSPACE/hdk/tests/simulation_tests/test_sims.py -k \"${key}\" --junit-xml $WORKSPACE/${report_file} --simulator ${simulator} --batch 'FALSE'
+ """
+ }
+ } catch (exc) {
+ echo "${node_name} failed"
+ throw exc
+ } finally {
+ run_junit(report_file)
+ archiveArtifacts artifacts: "hdk/cl/examples/${cl_dir_name}/**/*.sim.log", fingerprint: true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ parallel sim_nodes
+ }
+ }
+}
+
+
+//=============================================================================
+// SDK Tests
+//=============================================================================
+
+
+// Run the tests here
+parallel all_tests
diff --git a/README.md b/README.md
index c1a7f432..179a835a 100644
--- a/README.md
+++ b/README.md
@@ -8,192 +8,199 @@ Below is the standard aws-fpga documentation from upstream.
# Table of Contents
-1. [Overview of AWS EC2 FPGA Development Kit](#overviewdevkit)
- - [Development environments](#overviewdevenv)
- - [Runtime environments](#overviewrunenv)
- - [Example applications](#overviewexapps)
- - [Development tools](#overviewdevtools)
-2. [Getting Started](#gettingstarted)
-3. [FPGA Developer AMI available on AWS Marketplace](#devAmi)
-4. [FPGA Hardware Development Kit (HDK)](#fpgahdk)
-5. [FPGA Software Development Kit (SDK)](#fpgasdk)
-6. [OpenCL Development Environment with Amazon EC2 F1 FPGA Instances to accelerate your C/C++ applications](#sdaccel)
-7. [Developer Support](#devSupport)
-8. [Recommended Documentation](#doccontents)
-9. [Github tips and tricks](#githubtipstricks)
-
-
-
+1. [Overview of AWS EC2 FPGA Development Kit](#overview-of-aws-ec2-fpga-development-kit)
+ - [Development Flow](#development-flow)
+ - [Development environments](#development-environments)
+ - [FPGA Developer AMI](#fpga-developer-ami)
+ - [FPGA Hardware Development Kit (HDK)](#hardware-development-kit-hdk)
+ - [FPGA Software Development Kit (SDK)](#runtime-tools-sdk)
+ - [Software Defined Development Environment](#software-defined-development-environment)
+1. [Amazon EC2 F1 platform features](#amazon-ec2-f1-platform-features)
+1. [Getting Started](#getting-started)
+ - [Getting Familiar with AWS](#getting-familiar-with-aws)
+ - [First time setup](#setting-up-development-environment-for-the-first-time)
+ - [Quickstarts](#quickstarts)
+ - [How To's](#how-tos)
+1. [Documentation Overview](#documentation-overview)
+1. [Developer Support](#developer-support)
+
# Overview of AWS EC2 FPGA Development Kit
-The AWS EC2 FPGA Development Kit is provided by AWS to support development and runtime on [AWS FPGA instances](https://aws.amazon.com/ec2/instance-types/f1/). Amazon EC2 FPGA instances are high-performance compute instances with field programmable gate arrays (FPGAs) that are programmed to create custom hardware accelerations in EC2. F1 instances are easy to program and AWS provides everything needed to develop, simulate, debug, compile and run hardware accelerated applications. Using the [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ), developers create an FPGA design. Once the FPGA design (also called CL - Custom logic) is complete, developers create the Amazon FPGA Image (AFI), and easily deploy it to the F1 instance. AFIs are reusable, shareable and can be deployed in a scalable and secure way.
-
+AWS EC2 FPGA Development Kit is a set of development and runtime tools to develop, simulate, debug, compile and run hardware accelerated applications on [Amazon EC2 F1 instances](https://aws.amazon.com/ec2/instance-types/f1/).
+It is distributed between this github repository and FPGA Developer AMI - [Centos](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ)/[AL2](https://aws.amazon.com/marketplace/pp/B08NTMMZ7X) provided by AWS with no cost of development tools.
-
-## Overview of Development Environments
-
-| Development Environment | Description | Accelerator Language | Development Tool | Debug Options| Typical Developer / FPGA Experience |
-| --------|---------|---------|-------|-------|-------|
-| [Software Defined Accelerator Development - SDAccel](SDAccel/README.md) | Development experience leverages an optimized compiler to allow easy new accelerator development or migration of existing C/C++/openCL, Verilog/VHDL to AWS FPGA instances | C/C++/OpenCL, Verilog/VHDL (RTL) | SDx/Vivado (GUI or scipt) | SW/HW Emulation, Simulation, GDB, Virtual JTAG (Chipscope) | SW or HW Developer with zero FPGA experience |
-| [Hardware Accelerator Development - HDK](hdk/README.md) | Fully custom hardware development experience provides hardware developers with the tools required for developing AFIs for AWS FPGA instances | Verilog/VHDL | Vivado | Simulation, Virtual JTAG | HW Developer with advanced FPGA experience |
-| [IP Integrator or High Level Synthesis (HLx)](hdk/docs/IPI_GUI_Vivado_Setup.md) | Graphical interface development experience for integrating IP and high level synthesis development | Verilog/VHDL/C | Vivado (GUI) | Simulation, Virtual JTAG | HW Developer with intermediate FPGA experience |
-
-
-## Overview of Runtime Environments
-
-| Runtime Environment | Hardware Interface | Host Code Language | FPGA Tools |
-| --------|---------|---------|-------|
-| [C/C++ Software Defined Accelerator Development](SDAccel/README.md) | OpenCL APIs, [XOCL Driver](./sdk/linux_kernel_drivers/xocl), [HAL](SDAccel/userspace/src2) | C/C++ | [SDK](./sdk), SDx |
-| [Hardware Accelerator Development](hdk/README.md) | [XDMA Driver](sdk/linux_kernel_drivers/xdma/README.md), [peek/poke](sdk/userspace/README.md) | C/C++ | [SDK](./sdk), Vivado |
-| [IP Integrator or High Level Synthesis (HLx)](hdk/docs/IPI_GUI_Vivado_Setup.md) | [XDMA Driver](sdk/linux_kernel_drivers/xdma/README.md), [peek/poke](sdk/userspace/README.md) | C/C++ | [SDK](./sdk), Vivado |
-
-
-## Overview of Development Tools
-
-| Tool | Development/Runtime | Tool location | Description |
-| --------|---------|---------|---------|
-| SDx 2017.4 & 2018.2 | Development | [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) | Used for [Software Defined Accelerator Development](SDAccel/README.md) |
-| Vivado 2017.4 & 2018.2 | Development | [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) | Used for [Hardware Accelerator Development](hdk/README.md) |
-| FPGA AFI Management Tools | Runtime | [SDK - fpga\_mgmt\_tools](sdk/userspace/fpga_mgmt_tools) | Command-line tools used for FPGA management while running on the F1 instance |
-| Virtual JTAG | Development (Debug) | [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) | Runtime debug waveform |
-| wait\_for\_afi | Development | [wait\_for\_afi.py](shared/bin/scripts/wait_for_afi.py) | Helper script that notifies via email on AFI generation completion |
-| notify\_via\_sns | Development | [notify\_via\_sns.py](shared/bin/scripts/notify_via_sns.py) | Notifies developer when design build process completes |
-| AFI Administration | Development | [Copy](hdk/docs/copy_fpga_image.md), [Delete](hdk/docs/delete_fpga_image.md), [Describe](hdk/docs/describe_fpga_images.md), [Attributes](hdk/docs/fpga_image_attributes.md) | AWS CLI EC2 commands for managing your AFIs |
-
-
-NOTE: For on-premises development, SDx/Vivado must have the correct license and use one of the [supported versions of SDx/Vivado](./supported_vivado_versions.txt). The FPGA HDK+SDK [Release Notes](./RELEASE_NOTES.md) may contain additional information. The following links have more information on on-premises development: [Vivado requirements](hdk/docs/on_premise_licensing_help.md) and [SDx requirements](SDAccel/docs/On_Premises_Development_Steps.md)
-
-
-## Overview of Example Applications
-| Accelerator Application | Example | Development Environment | Description |
-| --------|---------|---------|-------|
-| Custom hardware | [cl\_hello\_world](hdk/cl/examples/cl_hello_world) | HDK - RTL (Verilog) | Simple [getting started example](hdk/README.md) with minimal hardware |
-| Custom hardware | [cl\_dram\_dma](hdk/cl/examples/cl_dram_dma) | HDK - RTL (Verilog) | Demonstrates CL connectivity to the F1 shell and connectivity to/from all DDRs |
-| Custom hardware IP integration example using a GUI | [cl\_dram\_dma\_hlx](hdk/cl/examples/cl_dram_dma_hlx) | HLx - Verilog | Demonstrates CL connectivity to the F1 shell and connectivity to/from DRAM using the Vivado IP Integrator GUI |
-| Virtual Ethernet Application | [Example Application](sdk/apps/virtual-ethernet) | [HDK SDE Example](hdk/cl/examples/cl_sde) | The Virtual Ethernet framework facilitates streaming Ethernet frames from a network interface (or any source) into the FPGA for processing and back out to some destination. Possible use cases for this include deep packet inspection, software defined networking, stream encryption or compression, and more. |
-| Pipelined Workload Applications | [cl\_dram\_dma\_data\_retention](hdk/docs/data_retention.md)| [HDK](hdk/cl/examples/cl_dram_dma/software/runtime/test_dram_dma_retention.c) [SDAccel](SDAccel/examples/aws/data_retention) | Demonstrates how to preserve data in DRAMs while swapping out accelerators. Applications that use a temporal accelerator pipeline can take advantage of this feature to reduce latency between FPGA image swaps |
-| Digital Up-Converter using High Level Synthesis | [cl\_hls\_dds\_hlx](hdk/cl/examples/cl_hls_dds_hlx) | HLx - C-to-RTL | Demonstrates an example application written in C that is synthesized to RTL (Verilog) |
-| Security | [AES, RSA, SHA1](https://github.com/Xilinx/SDAccel_Examples/tree/2018.2/security) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of using hardware acceleration to speed up security software algorithms |
-| Computer Vision | [Affine, Convolve, Huffman, IDCT](https://github.com/Xilinx/SDAccel_Examples/tree/master/vision) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of using hardware acceleration to speed up image detection algorithms |
-| Misc Algorithms | [Kmeans, SmithWaterman, MatrixMult](https://github.com/Xilinx/SDAccel_Examples/tree/master/acceleration) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of applying hardware acceleration to a variety of sorting and search algorithms |
-| Financial | [Blacksholes, Heston](https://github.com/KitAway/FinancialModels_AmazonF1) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of using hardware acceleration on Monte Carlo financial models |
-| Custom Hardware with Software Defined Acceleration | [RTL Kernels](https://github.com/Xilinx/SDAccel_Examples/tree/master/getting_started/rtl_kernel) | SDAccel - RTL (Verilog) + C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates a quick method for developing new or migrating existing hardware designs (RTL) |
-| File Compression | [GZip](https://github.com/Xilinx/Applications/tree/master/GZip) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of using hardware acceleration to speed up GZIP compression on an FPGA |
-| WebP Image Compression | [WebP](https://github.com/Xilinx/Applications/tree/master/webp) | SDAccel - C/C++/OpenCL | Developed using software defined acceleration, this example demonstrates methods of using hardware acceleration to speed up WebP encoder application on an FPGA |
-
-
-# Getting Started
+⚠️ NOTE: The developer kit is supported for Linux operating systems only.
-### New to AWS?
-If you have never used AWS before, we recommend you start with [AWS getting started training](https://aws.amazon.com/getting-started/), and focus on the basics of the [AWS EC2](https://aws.amazon.com/ec2/) and [AWS S3](https://aws.amazon.com/s3/) services. Understanding the fundamentals of these services will make it easier to work with AWS FPGAs.
+## Development Flow
+After creating an FPGA design (also called CL - Custom logic), developers can create an Amazon FPGA Image (AFI) and easily deploy it to an F1 instance. AFIs are reusable, shareable and can be deployed in a scalable and secure way.
-AWS FPGA generation and EC2 F1 instances are supported in the us-east-1 (N. Virginia), us-west-2 (Oregon), eu-west-1 (Ireland) and us-gov-west-1 ([GovCloud US](https://aws.amazon.com/govcloud-us/)) [regions](https://aws.amazon.com/about-aws/global-infrastructure/).
+
+## Development Environments
-### New to AWS FPGAs and setting up a development environment?
-The developer kit is supported for Linux operating systems only. You have the choice to develop on AWS EC2 using the [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) or on-premises. Within a linux environment, you can execute `git clone https://github.com/aws/aws-fpga.git` to download the latest release to your EC2 Instance or local server. Help on cloning from github is available [here](https://help.github.com/articles/which-remote-url-should-i-use/). When using a SSH connection, execute `git clone git@github.com:aws/aws-fpga.git`. [To get help with connecting to Github via SSH](https://help.github.com/articles/connecting-to-github-with-ssh/).
+| Development Environment | Description | Accelerator Language | Hardware Interface | Debug Options| Typical Developer |
+| --------|---------|-------|---------|-------|-------|
+| Software Defined Accelerator Development using [Vitis](Vitis/README.md)/[SDAccel](SDAccel/README.md)| Development experience leverages an optimized compiler to allow easy new accelerator development or migration of existing C/C++/openCL, Verilog/VHDL to AWS FPGA instances | C/C++/OpenCL, Verilog/VHDL (RTL) | OpenCL APIs and XRT | SW/HW Emulation, Simulation, GDB, Virtual JTAG (Chipscope) | SW or HW Developer with zero FPGA experience |
+| [Hardware Accelerator Development using Vivado](hdk/README.md) | Fully custom hardware development experience provides hardware developers with the tools required for developing AFIs for AWS FPGA instances | Verilog/VHDL | [XDMA Driver](sdk/linux_kernel_drivers/xdma/README.md), [peek/poke](sdk/userspace/README.md) | Simulation, Virtual JTAG | HW Developer with advanced FPGA experience |
+| [IP Integrator/High Level Design(HLx) using Vivado](hdk/docs/IPI_GUI_Vivado_Setup.md) | Graphical interface development experience for integrating IP and high level synthesis development | Verilog/VHDL/C | [XDMA Driver](sdk/linux_kernel_drivers/xdma/README.md), [peek/poke](sdk/userspace/README.md) | Simulation, Virtual JTAG | HW Developer with intermediate FPGA experience |
-Before you start your first AWS FPGA design, we recommend that you go through one of the step-by-step guides. The guides will walk through development steps for hello world examples. Based on the tables above, pick the development environment that best fits your needs and use the guide to get started:
- * For fastest way to get started on FPGA accelerator development, start with the software defined development environment. The guide starts with the [SW Hello World example](SDAccel/README.md).
- * Next use the same guide to develop using the C/C++/openCL/RTL based [80+ examples on github](./SDAccel/examples/xilinx_2017.4).
- * For custom hardware development (HDK) environment, start with the [HDK Hello World example](hdk/README.md).
- * Next use the same guide to develop using the [cl\_dram\_dma](hdk/cl/examples/cl_dram_dma).
+> For on-premise development, SDAccel/Vitis/Vivado must have the [correct license and use one of the supported tool versions](./docs/on_premise_licensing_help.md).
-### In-depth training and resources
-Once you have completed your hello world examples, we recommend diving deeper into a training workshop or application notes
- * Software-defined [re:Invent 2017 Workshop](https://github.com/awslabs/aws-fpga-app-notes/blob/master/reInvent17_Developer_Workshop/README.md) demonstrates a video encoder acceleration and how to debug and optimize your accelerator.
- * Custom hardware developers need to learn about how the hardware accelerator interfaces to the F1 Shell
- * [Shell Interface](hdk/docs/AWS_Shell_Interface_Specification.md)
- * [Shell Address Map](hdk/docs/AWS_Fpga_Pcie_Memory_Map.md)
- * [Programmer view of the FPGA](./hdk/docs/Programmer_View.md)
- * [Virtual JTAG](hdk/docs/Virtual_JTAG_XVC.md)
- * [Application for methods of interfacing the host application to the Hardware accelerator](https://github.com/awslabs/aws-fpga-app-notes)
+## FPGA Developer AMI
-
-# FPGA Developer AMI
+The [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) is available on the AWS marketplace without a software charge and includes tools needed for developing FPGA Designs to run on AWS F1.
-The [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) is available on the AWS marketplace without a software charge and includes free tools and drivers needed for FPGA development on EC2 instances. FPGA development runs on several [EC2 instance types](https://aws.amazon.com/ec2/instance-types/). Given the large size of the FPGA used inside the AWS FPGA instances, the implementation tools require 32GiB Memory (ex: z1d.xlarge, z1d.2xlarge, c5.4xlarge, m5.2xlarge, r5.xlarge, t2.2xlarge). z1d.xlarge/c5.4xlarge and z1d.2xlarge/c5.8xlarge would provide the fastest execution time with 30GiB+ and 60GiB+ of memory respectively. Developers who want to save on cost, could start coding and run simulations on low-cost instances, like t2.2xlarge, and move to the aforementioned larger instances to run the synthesis of their acceleration code.
+Given the large size of the FPGA used inside AWS F1 Instances, Xilinx tools work best with 32GiB Memory.
+z1d.xlarge/c5.4xlarge and z1d.2xlarge/c5.8xlarge instance types would provide the fastest execution time with 30GiB+ and 60GiB+ of memory respectively.
+Developers who want to save on cost, could start coding and run simulations on low-cost instances, like t2.2xlarge, and move to the aforementioned larger instances to run the synthesis of their acceleration code.
-Currently, AWS marketplace includes multiple versions of the FPGA developer AMI, supporting Xilinx SDx 2017.4 and 2018.2 toolchain versions. The following compatibility table describes the mapping of currently supported developer kit versions to AMI versions:
+AWS marketplace offers multiple versions of the FPGA Developer AMI. The following compatibility table describes the mapping of currently supported developer kit versions to AMI versions:
-| Developer Kit Version | Tool Version Supported | Compatible FPGA developer AMI Version |
+| Developer Kit Version | Tool Version Supported | Compatible FPGA Developer AMI Version |
|-----------|-----------|------|
-| 1.3.7-1.3.X | 2017.4 | v1.4.0-v1.4.X (Xilinx Vivado/SDx 2017.4) |
-| 1.4.X | 2017.4 | v1.4.0-v1.4.X (Xilinx Vivado/SDx 2017.4) |
-| 1.4.3+ | 2018.2 | v1.5.0-v1.5.X (Xilinx Vivado/SDx 2018.2) |
+| 1.4.18+ | 2020.2 | v1.10.X (Xilinx Vivado/Vitis 2020.2) |
+| 1.4.16+ | 2020.1 | v1.9.0-v1.9.X (Xilinx Vivado/Vitis 2020.1) |
+| 1.4.13+ | 2019.2 | v1.8.0-v1.8.X (Xilinx Vivado/Vitis 2019.2) |
+| 1.4.11+ | 2019.1 | v1.7.0-v1.7.X (Xilinx Vivado/SDx 2019.1) |
+| 1.4.8 - 1.4.15a | 2018.3 | v1.6.0-v1.6.X (Xilinx Vivado/SDx 2018.3) |
+| 1.4.3 - 1.4.15a | 2018.2 | v1.5.0-v1.5.X (Xilinx Vivado/SDx 2018.2) |
+| 1.3.7 - 1.4.15a | 2017.4 | v1.4.0-v1.4.X (Xilinx Vivado/SDx 2017.4) |
+
+⚠️ Developer kit release v1.4.16 will remove support for Xilinx 2017.4, 2018.2, 2018.3 toolsets.
+While developer kit release v1.4.16 onwards will not support older Xilinx tools, you can still use them using HDK releases v1.4.15a or earlier.
+Please checkout [the latest v1.4.15a release tag from Github](https://github.com/aws/aws-fpga/releases/tag/v1.4.15a) to use Xilinx 2017.4, 2018.2, 2018.3 toolsets.
-Developer kit versions prior to v1.3.7 and Developer AMI prior to v1.4 (2017.1) reached end-of-life. See [AWS forum announcement](https://forums.aws.amazon.com/ann.jspa?annID=6068) for additional details.
+⚠️ Developer kit versions prior to v1.3.7 and Developer AMI prior to v1.4 (2017.1) reached end-of-life. See [AWS forum announcement](https://forums.aws.amazon.com/ann.jspa?annID=6068) for additional details.
- If developing using SDAccel environment please refer to this [Runtime Compatibility Table](SDAccel/docs/Create_Runtime_AMI.md#runtime-ami-compatability-table)
+For software-defined development please look at the runtime compatibility table based on the Xilinx toolset in use:
+[SDAccel](SDAccel/docs/Create_Runtime_AMI.md#runtime-ami-compatibility-table) or [Vitis](Vitis/docs/Create_Runtime_AMI.md#runtime-ami-compatibility-table)
-
-# Hardware Development Kit (HDK)
+## Hardware Development Kit (HDK)
-The [HDK directory](./hdk/README.md) contains useful information, examples, and scripts for developers wanting to start building Amazon FPGA Images (AFI). It includes the development environment, simulation, build and AFI creation scripts. The HDK can be installed on any on-premises server or an EC2 instance. The developer kit is not required if you plan to use a pre-built AFI shared from another developer.
+The [HDK directory](./hdk/README.md) contains documentation, examples, simulation, build and AFI creation scripts to start building Amazon FPGA Images (AFI).
+The HDK can be installed on any on-premises server or an EC2 instance.
+The developer kit is not required if you plan to use a pre-built AFI shared from another developer.
-
-# Software-defined Development Environment
+## Software-defined Development Environment
-The software-defined development environment allows customers to compile their C/C++/OpenCL code into the FPGA as kernels, and use OpenCL APIs to pass data to the FPGA. Software developers with no FPGA experience will find a familiar development experience that supercharges cloud applications.
+The software-defined development environment allows customers to compile their C/C++/OpenCL code into the FPGA as kernels, and use OpenCL APIs to pass data to the FPGA.
+Software developers with no FPGA experience will find a familiar development experience that supercharges cloud applications.
-In addition, this development environment (also called SDAccel) allows the mix of C/C++ and RTL accelerator designs into a C/C++ software based development environment. This method enables faster prototyping using C/C++ while supporting manual optimization of critical blocks within RTL. This approach is similar to optimizing time critical functions using software compiler optimization methods.
+In addition, this development environment allows for a mix of C/C++ and RTL accelerator designs into a C/C++ software based development environment. This method enables faster prototyping using C/C++ while supporting manual optimization of critical blocks within RTL. This approach is similar to optimizing time critical functions using software compiler optimization methods.
-This developer kit has 80+ examples to help you get started on FPGA acceleration. To get started, review the [Software-defined development environment readme](SDAccel/README.md).
+To get started with Xilinx SDAccel, review the [Software-defined development environment readme](SDAccel/README.md).
+To get started with Xilinx Vitis, review the [Vitis unified development environment readme](Vitis/README.md).
-
-# Runtime Tools (SDK)
+## Runtime Tools (SDK)
The [SDK directory](./sdk/README.md) includes the runtime environment required to run on EC2 FPGA instances. It includes the drivers and tools to manage the AFIs that are loaded on the FPGA instance. The SDK isn't required during the AFI development process; it is only required once an AFI is loaded onto an EC2 FPGA instance. The following sdk resources are provided:
* Linux Kernel Drivers - The developer kit includes three drivers:
* [XDMA Driver](sdk/linux_kernel_drivers/xdma/README.md) - DMA interface to/from HDK accelerators.
- * [XOCL Driver](sdk/linux_kernel_drivers/xocl) - DMA interface with software defined accelerators (also called hardware kernels).
* [FPGA Libraries](sdk/userspace/fpga_libs) - APIs used by C/C++ host applications.
* [FPGA Management Tools](sdk/userspace/fpga_mgmt_tools/README.md) - AFI management APIs for runtime loading/clearing FPGA image, gathering metrics and debug interface on the F1 instance.
-
-# Developer Support
+# Amazon EC2 F1 Platform Features
+* 1-8 Xilinx UltraScale+ VU9P based FPGA slots
+* Per FPGA Slot, Interfaces available for Custom Logic(CL):
+ * One x16 PCIe Gen 3 Interface
+ * Four DDR4 RDIMM interfaces (with ECC)
+ * AXI4 protocol support on all interfaces
+* User-defined clock frequency driving all CL to Shell interfaces
+* Multiple free running auxiliary clocks
+* PCI-E endpoint presentation to Custom Logic(CL)
+ * Management PF (physical function)
+ * Application PF
+* Virtual JTAG, Virtual LED, Virtual DIP Switches
+* PCI-E interface between Shell(SH) and Custom Logic(CL).
+ * SH to CL inbound 512-bit AXI4 interface
+ * CL to SH outbound 512-bit AXI4 interface
+ * Multiple 32-bit AXI-Lite buses for register access, mapped to different PCIe BARs
+ * Maximum payload size set by the Shell
+ * Maximum read request size set by the Shell
+ * AXI4 error handling
+* DDR interface between SH and CL
+ * CL to SH 512-bit AXI4 interface
+ * 1 DDR controller implemented in the SH (always available)
+ * 3 DDR controllers implemented in the CL (configurable number of implemented controllers allowed)
+
+# Getting Started
+
+### Getting familiar with AWS
+If you have never used AWS before, we recommend you start with [AWS getting started training](https://aws.amazon.com/getting-started/), and focus on the basics of the [AWS EC2](https://aws.amazon.com/ec2/) and [AWS S3](https://aws.amazon.com/s3/) services.
+Understanding the fundamentals of these services will make it easier to work with AWS F1 and the FPGA Developer Kit.
+
+FPGA Image generation and EC2 F1 instances are supported in the us-east-1 (N. Virginia), us-west-2 (Oregon), eu-west-1 (Ireland) and us-gov-west-1 ([GovCloud US](https://aws.amazon.com/govcloud-us/)) [regions](https://aws.amazon.com/about-aws/global-infrastructure/).
-The [**Amazon FPGA Development User Forum**](https://forums.aws.amazon.com/forum.jspa?forumID=243&start=0) is the first place to go to post questions, learn from other users and read announcements from the EC2 FPGA team.
+> ⚠️ NOTE: By default, your AWS Account will have an EC2 F1 Instance launch limit of 0.
+> Before using F1 instances, you will have to open a [Support Case](https://console.aws.amazon.com/support/home#/case/create) to increase the EC2 Instance limits to allow launching F1 instances.
-* Click the "Watch" button in GitHub upper right corner to get regular updates.
-* We recommend you will join the [AWS forum](https://forums.aws.amazon.com/forum.jspa?forumID=243) to engage with the FPGA developer community and get help when needed (both AWS and Xilinx engineers monitor this forum).
-* In case you can't see "Your Stuff" details, you will need to logout using the logout button on the forums page and log back in again.
+### Setting up development environment for the first time
+
+You have the choice to develop on AWS EC2 using the [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) or on-premise.
+
+> ℹ️ INFO: We suggest starting with the FPGA Developer AMI with [build instances](#fpga-developer-ami) on EC2 as it has Xilinx tools and licenses setup for you to be able to quickly get into development.
+
+> ℹ️ INFO: For on-premise development, you will need to have [Xilinx tools and licenses available for you to use](./docs/on_premise_licensing_help.md)
+
+1. Start a Build Instance first to start your development.
+ > 💡 TIP: This instance does not have to be an F1 instance. You only require an F1 instance to run your AFI's(Amazon FPGA Image) once you have gone through your design build and AFI creation steps.
+
+ > ℹ️ INFO: If you need to follow GUI Development flows, please checkout our [Developer Resources](./developer_resources/README.md) where we provide Step-By-Step guides to setting up a GUI Desktop.
+1. Clone the [FPGA Developer Kit](https://github.com/aws/aws-fpga) on your instance.
+ ```git clone https://github.com/aws/aws-fpga.git```
+1. Follow the quickstarts from the next section.
+
+### Quickstarts
+Before you create your own AWS FPGA design, we recommend that you go through one of the step-by-step Quickstart guides:
+
+| Description | Quickstart | Next Steps |
+|----|----|----|
+| Software Defined Accelerator Development using Xilinx Vitis | [Vitis hello_world Quickstart](Vitis/README.md) | [60+ Vitis examples](./Vitis/examples/), [Vitis Library Examples](./docs/examples/example_list.md) |
+| Software Defined Accelerator Development using Xilinx SDAccel | [SDAccel hello_world Quickstart](SDAccel/README.md) | [60+ SDAccel examples](./SDAccel/examples/) |
+| Custom Hardware Development(HDK) | [HDK hello_world Quickstart](hdk/README.md) | [CL to Shell and DRAM connectivity example](./hdk/cl/examples/cl_dram_dma), [Virtual Ethernet Application](./sdk/apps/virtual-ethernet) using the [Streaming Data Engine](./hdk/cl/examples/cl_sde) |
+| IP Integrator/High Level Design(HLx) | [IPI hello_world Quickstart](hdk/cl/examples/cl_hello_world_hlx/README.md) | [IPI GUI Examples](hdk/docs/IPI_GUI_Examples.md) |
+
+ℹ️ INFO: For more in-depth applications and examples of using High level synthesis, Vitis Libraries, App Notes and Workshops, please refer to our [Example List](./docs/examples/example_list.md)
+
+### How Tos
+| How To | Description |
+|----|----|
+| [Migrate Alveo U200 designs to F1](./Vitis/docs/Alveo_to_AWS_F1_Migration.md) | This application note shows the ease of migrating an Alveo U200 design to F1. |
-
# Documentation Overview
-The documentation is located throughout this developer kit, therefore, to help developers find information quicker the table below consolidates a list of key documents:
+Documentation is located throughout this developer kit and the table below consolidates a list of key documents to help developers find information:
| Topic | Document Name | Description |
|-----------|-----------|------|
-| Developer Kit Features | [RELEASE\_NOTES](./RELEASE_NOTES.md), [Errata](./ERRATA.md) | Release notes and Errata for all developer kit features, excluding the shell |
-| Frequently asked questions | [FAQ](./FAQs.md), [Errata](./ERRATA.md) | Q/A are added based on developer feedback and common AWS forum questions |
-| F1 Shell (HDK) | [AWS\_Shell\_RELEASE\_NOTES](./hdk/docs/AWS_Shell_RELEASE_NOTES.md), [AWS\_Shell\_ERRATA](./hdk/docs/AWS_Shell_ERRATA.md) | Release notes and Errata for F1 shell |
-| F1 Shell (HDK) | [AWS\_Shell\_Interface\_Specification](hdk/docs/AWS_Shell_Interface_Specification.md) | Shell-CL interface specification for HDK developers building AFI |
-| AWS setup | [Setup\_AWS\_CLI\_and\_S3\_Bucket](SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md) | Setup instructions for preparing for AFI creation |
-| SDx graphical interface (SDAccel) | [README\_GUI](SDAccel/docs/README_GUI.md) | Instructions using the SDx GUI for software defined acceleration development and debug |
-| Software defined acceleration using RTL (SDAccel) | [Debug\_RTL\_Kernel](SDAccel/docs/Debug_RTL_Kernel.md) | Instructions on debugging RTL Kernel |
-| Software defined acceleration Run time (SDAccel) | [Create\_Runtime\_AMI](SDAccel/docs/Create_Runtime_AMI.md) | Instructions on creating a runtime AMI |
-| Host Application (HDK) | [Programmer\_View](hdk/docs/Programmer_View.md) | Host application to CL interface specification |
-| CL Debug (HDK) | [Virtual\_JTAG\_XVC](hdk/docs/Virtual_JTAG_XVC.md) | Debugging CL using Virtual JTAG (Chipscope) |
-| CL/Shell Simulation (HDK) | [RTL\_Simulating\_CL\_Designs](hdk/docs/RTL_Simulating_CL_Designs.md) | Shell-CL simulation specification |
-| Driver (HDK) | [README](sdk/linux_kernel_drivers/xdma/README.md) | Describes the DMA driver (XDMA) used by HDK examples and includes a link to an installation guide |
-| Shell Timeout and AXI Protocol Protection | [HOWTO\_detect\_shell\_timeout](hdk/docs/HOWTO_detect_shell_timeout.md) | The shell will terminate transactions after a time period or on an illegal transaction. This describes how to detect and gather data to help debug CL issues caused by timeouts. |
-| AFI Power | [afi\_power](hdk/docs/afi_power.md) | Helps developers with understanding AFI power and preventing power violations on the F1 instance |
-| AFI Management | [README](sdk/userspace/fpga_mgmt_tools/README.md) | CLI documentation for managing AFI on the F1 instance |
-| AFI Administration | [copy\_fpga\_image](hdk/docs/copy_fpga_image.md), [delete\_fpga\_image](hdk/docs/delete_fpga_image.md), [describe\_fpga\_images](hdk/docs/describe_fpga_images.md), [fpga\_image\_attributes](hdk/docs/fpga_image_attributes.md) | CLI documentation for administering AFIs |
-| AFI Creation Error Codes | [create\_fpga\_image\_error\_codes](hdk/docs/create_fpga_image_error_codes.md) | CLI documentation for managing AFIs |
-| Developing on-premises | [HDK: on\_premise\_licensing\_help](hdk/docs/on_premise_licensing_help.md), [SDAccel: On\_Premises\_Development\_Steps](SDAccel/docs/On_Premises_Development_Steps.md) | Guidance for developer wanting to develop AFIs from on-premises instead of using the [FPGA developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) running on AWS EC2 |
-
-
-
-# Github tips and tricks
- * [Cloning the repository](https://help.github.com/articles/cloning-a-repository/)
- * [Forking the repository](https://help.github.com/articles/fork-a-repo/)
- * [Searching code](https://help.github.com/articles/searching-code/) and [advanced search syntax](https://help.github.com/articles/understanding-the-search-syntax/)
- * [Finding files](https://help.github.com/articles/finding-files-on-github/)
- * Simply replace github.com with gitprint.com to generate a printable PDF
+| AWS setup | [Setup AWS CLI and S3 Bucket](./SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md) | Setup instructions for preparing for AFI creation |
+| Developer Kit | [RELEASE NOTES](./RELEASE_NOTES.md), [Errata](./ERRATA.md) | Release notes and Errata for all developer kit features, excluding the shell |
+| Developer Kit | [Errata](./ERRATA.md) | Errata for all developer kit features, excluding the shell |
+| F1 Shell | [AWS Shell RELEASE NOTES](./hdk/docs/AWS_Shell_RELEASE_NOTES.md) | Release notes for F1 shell |
+| F1 Shell | [AWS Shell ERRATA](./hdk/docs/AWS_Shell_ERRATA.md) | Errata for F1 shell |
+| F1 Shell | [AWS Shell Interface Specification](./hdk/docs/AWS_Shell_Interface_Specification.md) | Shell-CL interface specification for HDK developers building AFI |
+| F1 Shell - Timeout and AXI Protocol Protection | [How to detect a shell timeout](hdk/docs/HOWTO_detect_shell_timeout.md) | The shell will terminate transactions after a time period or on an illegal transaction. This describes how to detect and gather data to help debug CL issues caused by timeouts. |
+| Vitis | [Debug Vitis Kernel](./Vitis/docs/Debug_Vitis_Kernel.md) | Instructions on debugging Vitis Kernel |
+| Vitis | [Create Runtime AMI](./Vitis/docs/Create_Runtime_AMI.md) | Instructions on creating a runtime AMI when using Xilinx Vitis|
+| Vitis | [XRT Instructions](./Vitis/docs/XRT_installation_instructions.md) | Instructions on building, installing XRT with MPD daemon considerations for F1 |
+| SDAccel | [Debug RTL Kernel](./SDAccel/docs/Debug_RTL_Kernel.md) | Instructions on debugging RTL Kernel with SDAccel |
+| SDAccel | [Create Runtime AMI](./SDAccel/docs/Create_Runtime_AMI.md) | Instructions on creating a runtime AMI when using Xilinx SDAccel|
+| HDK - Host Application | [Programmer View](./hdk/docs/Programmer_View.md) | Host application to CL interface specification |
+| HDK - CL Debug | [Debug using Virtual JTAG](./hdk/docs/Virtual_JTAG_XVC.md) | Debugging CL using Virtual JTAG (Chipscope) |
+| HDK - Simulation | [Simulating CL Designs](./hdk/docs/RTL_Simulating_CL_Designs.md) | Shell-CL simulation specification |
+| HDK - Driver | [README](./sdk/linux_kernel_drivers/xdma/README.md) | Describes the DMA driver (XDMA) used by HDK examples and includes a link to an installation guide |
+| AFI | [AFI Management SDK](./sdk/userspace/fpga_mgmt_tools/README.md) | CLI documentation for managing AFI on the F1 instance |
+| AFI - EC2 CLI | [copy\_fpga\_image](./hdk/docs/copy_fpga_image.md), [delete\_fpga\_image](./hdk/docs/delete_fpga_image.md), [describe\_fpga\_images](./hdk/docs/describe_fpga_images.md), [fpga\_image\_attributes](./hdk/docs/fpga_image_attributes.md) | CLI documentation for administering AFIs |
+| AFI - Creation Error Codes | [create\_fpga\_image\_error\_codes](hdk/docs/create_fpga_image_error_codes.md) | CLI documentation for managing AFIs |
+| AFI - Power | [FPGA Power, recovering from clock gating](./hdk/docs/afi_power.md) | Helps developers with understanding FPGA power usage, preventing power violations on the F1 instance and recovering from a clock gated slot. |
+| On-premise Development | [Tools, Licenses required for on-premise development](./docs/on_premise_licensing_help.md) | Guidance for developer wanting to develop AFIs from on-premises instead of using the [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) |
+| Frequently asked questions | [FAQ](./FAQs.md)| Q/A are added based on developer feedback and common AWS forum questions |
+# Developer Support
+* The [**Amazon FPGA Development User Forum**](https://forums.aws.amazon.com/forum.jspa?forumID=243&start=0) is the first place to go to post questions, learn from other users and read announcements.
+ * We recommend joining the [AWS forums](https://forums.aws.amazon.com/forum.jspa?forumID=243) to engage with the FPGA developer community, AWS and Xilinx engineers to get help.
+* You could also file a [Github Issue](https://github.com/aws/aws-fpga/issues) for support. We prefer the forums as this helps the entire community learn from issues, feedback and answers.
+ * Click the "Watch" button in GitHub upper right corner to get regular updates.
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 86f06856..38d40175 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,33 +1,118 @@
-
# AWS EC2 FPGA HDK+SDK Release Notes
+## Release 1.4.18 (See [ERRATA](./ERRATA.md) for unsupported features)
+* FPGA developer kit now supports Xilinx Vivado/Vitis 2020.2
+
+## Release 1.4.17 (See [ERRATA](./ERRATA.md) for unsupported features)
+* Updated XDMA Driver to allow builds on newer kernels
+* Updated documentation on Alveo U200 to F1 platform porting
+* Added Vitis 2019.2 Patching for AR#73068
+
+## Release 1.4.16 (See [ERRATA](./ERRATA.md) for unsupported features)
+* FPGA developer kit now supports Xilinx Vivado/Vitis 2020.1
+ * To upgrade, use [Developer AMI v1.9.0](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) on the AWS Marketplace.
+* Updated Vitis examples to include usage of Vitis Libraries.
+* Added documentation and examples to show Xilinx Alveo design migration to F1.
+
+## Release 1.4.15a (See [ERRATA](./ERRATA.md) for unsupported features)
+* Fixed Xilinx AR#73068 patching
+ * DDR4 IP needs to be regenerated for the patch to take effect.
+* Updated cl_dram_dma public AFI.
+
+## Release 1.4.15 (See [ERRATA](./ERRATA.md) for unsupported features)
+* Added Xilinx AR#73068 patching
+* Added DMA range error to the interrupt status register metrics
+* Enhanced DDR model rebuild qualifiers in hdk_setup.sh
+* Updated Virtual JTAG Documentation
+
+## Release 1.4.14 (See [ERRATA](./ERRATA.md) for unsupported features)
+* Updated Vitis Platform file to fix a DDR bandwidth issue
+* Added Vitis Debug Documentation
+
+## Release 1.4.13 (See [ERRATA](./ERRATA.md) for unsupported features)
+* FPGA developer kit now supports Xilinx Vivado/Vitis 2019.2
+* To upgrade, use [Developer AMI v1.8.0](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) on the AWS Marketplace.
+
+## Release 1.4.12 (See [ERRATA](./ERRATA.md) for unsupported features)
+* Added supported versions for BJS AMI's
+* Added link to the re:Invent 19 F1 workshop
+* Fixed missing extern C declaration by PR #473
+* Documentation Path fixes from PR #466, #468 and #470
+
+## Release 1.4.11 (See [ERRATA](./ERRATA.md) for unsupported features)
+* FPGA developer kit now supports Xilinx SDx/Vivado 2019.1
+ * We recommend developers upgrade to v1.4.11 to benefit from the new features, bug fixes, and optimizations.
+ * To upgrade, use [Developer AMI v1.7.0](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) on the AWS Marketplace. The Developer Kit scripts (hdk_setup.sh or sdaccel_setup.sh) will detect the tool version and update the environment based on requirements needed for Xilinx 2019.1 tools.
+* New functionality:
+ * Added a [developer resources section](./developer_resources/README.md) that provides guides on how to setup your own GUI Desktop and compute cluster environment.
+ * Developers can now ask for AFI limit increases via the [AWS Support Center Console](https://console.aws.amazon.com/support/cases#/create).
+ * Create a case to increase your `EC2 FPGA` service limit from the console.
+ * HLx IPI flow updates
+ * HLx support for AXI Fast Memory mode.
+ * HLx support for 3rd party simulations.
+ * HLx support for changes in shell and AWS IP updates(e.g. sh_ddr).
+* Bug Fixes:
+ * Documentation fixes in the [Shell Interface Specification](./hdk/docs/AWS_Shell_Interface_Specification.md)
+ * Fixes for forum questions
+ * [Unable to compile aws_v1_0_vl_rfs.sv in Synopsys VCS](https://forums.aws.amazon.com/thread.jspa?threadID=308829&tstart=0)
+ * [Use fpga_mgmt init in HLx runtime](https://forums.aws.amazon.com/thread.jspa?messageID=912063)
+ * New XRT versions added to the [XRT Installation Instructions](./SDAccel/docs/XRT_installation_instructions.md) to fix segmentation faults when using xclbin instead of awsxclbin files.
+* Deprecations:
+ * Removed GUI Setup scripts from AMI v1.7.0 onwards. See the [developer resources section](./developer_resources/README.md) that provides guides on how to setup your own GUI Desktop and compute cluster environment.
+* Package versions used for validation
+
+ | Package | AMI 1.7.0 [2019.1] | AMI 1.6.0 [2018.3] |AMI 1.5.0 [2018.2] | AMI 1.4.0 [2017.4] |
+ |---------|---|------------------------|------------------------|-----------------------|
+ | OS | Centos 7.6 | Centos 7.6 | Centos 7.5, 7.6 | Centos 7.4 |
+ | kernel | 3.10.0-957.27.2.el7.x86_64 | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | kernel-devel | 3.10.0-957.27.2.el7.x86_64 | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | LIBSTDC++ | libstdc++-4.8.5-36.el7_6.2.x86_64 | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-16.el7_4.2.x86_64 |
+
+## Release 1.4.10 (See [ERRATA](./ERRATA.md) for unsupported features)
+* New functionality:
+ * SDK now sorts the slots in DBDF order. Any scripts or integration maintainers should note that the slot order will be different from previous versions and should make any updates accordingly.
+
+* Bug Fixes:
+ * Fixes a bug in the [Automatic Traffic Generator (ATG)](./hdk/cl/examples/cl_dram_dma/design/cl_tst.sv). In SYNC mode, the ATG did not wait for write response transaction before issuing read transactions.
+ * Released [Xilinx runtime(XRT) version 2018.3.3.2](https://github.com/Xilinx/XRT/releases/tag/2018.3.3.2) to fix the following error:
+ `symbol lookup error: /opt/xilinx/xrt/lib/libxrt_aws.so: undefined symbol: uuid_parse!`
+ * This release fixes a bug wherein concurrent AFI load requests on two or more slots resulted in a race condition which sometimes resulted in Error: `(20) pci-device-missing`
+ * This release fixes a issue with coding style of logic which could infer a latch during synthesis in [sde_ps_acc module](./hdk/cl/examples/cl_sde/design/sde_ps_acc.sv) within cl_sde example
+
+* Package versions used for validation
+
+ | Package | AMI 1.6.0 [2018.3] |AMI 1.5.0 [2018.2] | AMI 1.4.0 [2017.4] |
+ |---------|------------------------|------------------------|-----------------------|
+ | OS | Centos 7.6 | Centos 7.5, 7.6 | Centos 7.4 |
+ | kernel | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | kernel-devel | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | LIBSTDC++ | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-16.el7_4.2.x86_64 |
+
-## AWS EC2 F1 Platform Features:
- * 1-8 Xilinx UltraScale+ VU9P based FPGA slots
- * Per FPGA Slot, Interfaces available for Custom Logic(CL):
- * One x16 PCIe Gen 3 Interface
- * Four DDR4 RDIMM interfaces (with ECC)
- * AXI4 protocol support on all interfaces
- * User-defined clock frequency driving all CL to Shell interfaces
- * Multiple free running auxiliary clocks
- * PCI-E endpoint presentation to Custom Logic(CL)
- * Management PF (physical function)
- * Application PF
- * Virtual JTAG, Virtual LED, Virtual DIP Switches
- * PCI-E interface between Shell(SH) and Custom Logic(CL).
- * SH to CL inbound 512-bit AXI4 interface
- * CL to SH outbound 512-bit AXI4 interface
- * Multiple 32-bit AXI-Lite buses for register access, mapped to different PCIe BARs
- * Maximum payload size set by the Shell
- * Maximum read request size set by the Shell
- * AXI4 error handling
- * DDR interface between SH and CL
- * CL to SH 512-bit AXI4 interface
- * 1 DDR controller implemented in the SH (always available)
- * 3 DDR controllers implemented in the CL (configurable number of implemented controllers allowed)
+## Release 1.4.9 (See [ERRATA](./ERRATA.md) for unsupported features)
+ * New functionality:
+ * Improved AFI load times for pipelined accelerator designs. For more details please see [Amazon FPGA image (AFI) pre-fetch and caching features](./hdk/docs/load_times.md).
+ * Ease of Use features:
+ * [Improved SDK Error messaging](./sdk/userspace/fpga_libs/fpga_mgmt/fpga_mgmt.c)
+ * [Improved documentation](./hdk/docs/IPI_GUI_Vivado_Setup.md#switching-between-hdk-and-hlx-flows) to help with transition from [HLX to HDK command line flows](https://forums.aws.amazon.com/thread.jspa?threadID=302718&tstart=0) and vice versa
+ * Incorporates feedback from [aws-fpga Issue 458](https://github.com/aws/aws-fpga/issues/458) by making the ```init_ddr``` function, used in design simulations to initialize DDR, more generic by moving out ATG deselection logic to a new ```deselect_atg_hw``` task
+
+ * Bug Fixes:
+ * Fixed Shell simulation model (sh_bfm) issue on PCIM AXI read data channel back pressure which was described in HDK 1.4.8 Errata.
+ * Fixed HDK simulation example which [demonstrates DMA and PCIM traffic in parallel](./hdk/cl/examples/cl_dram_dma/verif/tests/test_dma_pcim_concurrent.sv).
+
+ * Package versions used for validation
+
+ | Package | AMI 1.6.0 [2018.3] |AMI 1.5.0 [2018.2] | AMI 1.4.0 [2017.4] |
+ |---------|------------------------|------------------------|-----------------------|
+ | OS | Centos 7.6 | Centos 7.5, 7.6 | Centos 7.4 |
+ | kernel | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | kernel-devel | 3.10.0-957.5.1.el7.x86_64 | 3.10.0-862.11.6.el7.x86_64, 3.10.0-957.1.3.el7.x86_64 | 3.10.0-693.21.1.el7.x86_64 |
+ | LIBSTDC++ | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-16.el7_4.2.x86_64 |
+
## Release 1.4.8 (See [ERRATA](./ERRATA.md) for unsupported features)
- * FPGA developer kit supports Xilinx SDx/Vivado 2018.3
+ * FPGA developer kit supports Xilinx SDx/Vivado 2018.3
* We recommend developers upgrade to v1.4.8 to benefit from the new features, bug fixes, and optimizations. To upgrade, use [Developer AMI v1.6.0](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) on AWS Marketplace. The Developer Kit scripts (hdk_setup.sh or sdaccel_setup.sh) will detect the tool version and update the environment based on requirements needed for Xilinx 2018.3 tools.
* Ease of Use features:
* Support for importing results into SDx GUI - By importing results from a script-based flow into SDx IDE, developers can leverage the tools for debug/profiling while keeping flexibility of the script-based flow
@@ -58,9 +143,6 @@
| LIBSTDC++ | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-36.el7.x86_64 | libstdc++-4.8.5-16.el7_4.2.x86_64 |
-
-
-
## Release 1.4.7 (See [ERRATA](./ERRATA.md) for unsupported features)
* Adds [Xilinx Runtime (XRT)](https://github.com/Xilinx/XRT/releases/tag/2018.2_XDF.RC5) Support for Linux kernel 3.10.0-957.1.3.el7.x86_64 & Centos 7.6
@@ -107,7 +189,7 @@
## Release 1.4.5 (See [ERRATA](./ERRATA.md) for unsupported features)
-* [Documents SDAccel Runtime compatibility](SDAccel/docs/Create_Runtime_AMI.md#runtime-ami-compatability-table)
+* [Documents SDAccel Runtime compatibility](SDAccel/docs/Create_Runtime_AMI.md#runtime-ami-compatibility-table)
* [Enables SDK FPGA Mgmt tool access to Non-root users](sdk/README.md#using-fpga-as-non-root-user)
* Fixed issues
* [HLX simulation failure](https://forums.aws.amazon.com/thread.jspa?threadID=293313&tstart=0)
@@ -120,7 +202,7 @@
## Release 1.4.3 (See [ERRATA](./ERRATA.md) for unsupported features)
* [DRAM Data Retention](hdk/docs/data_retention.md) - With DRAM data retention, developers can simply load a new AFI and continue using the data that is persistently kept in the DRAM attached to the FPGA, eliminating unnecessary data movements and greatly improving the overall application performance.
* [Virtual Ethernet](./sdk/apps/virtual-ethernet/README.md) - Provides a low latency network interface for EC2 F1, that enables high performance hardware acceleration to ethernet based applications on AWS like firewalls, routers and advanced security virtual appliances. With Virtual Ethernet, developers are able to create F1 accelerators that process ethernet packets directly from user-space on the FPGA with high throughput and low-latency.
-* [Developer AMI v1.5](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) with Vivado/SDx 2018.2 tools - New FPGA developer AMI supporting Vivado 2018.2 for faster compile times, higher frequencies and improved timing closure
+* [Developer AMI v1.5](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) with Vivado/SDx 2018.2 tools - New FPGA Developer AMI supporting Vivado 2018.2 for faster compile times, higher frequencies and improved timing closure
## Release 1.4.2 (See [ERRATA](./ERRATA.md) for unsupported features)
* Fixed SDAccel XOCL driver compile fails that occur on linux kernels greater than 3.10.0-862.3.3.el7.x86_64
@@ -148,55 +230,6 @@
* Release 1.4.0 greatly improves the performance of the DMA (for interrupt driven DMA on the cl\_dram\_dma example design). This is accomplished through a combination of shell changes to relax DMA timeouts and a new XDMA software driver option. We have ported the relevant HDK examples to the XDMA driver in this release. EDMA is still supported, and developers can freely choose which DMA driver to use as part of their host application.
-## Supported Tools and Environment
-
-* The HDK and SDK are designed for **Linux** environment and has not been tested on other platforms
-* The First installation of AWS FPGA SDK requires having gcc installed on the instance. If it's not available, try `sudo yum update && sudo yum group install "Development Tools"`
-* The HDK build step requires having Xilinx's Vivado tool and Vivado License Management running. These are provided with AWS FPGA Developer AMI at no additional cost
-* This release is tested and validated with Xilinx 2017.4 SDx/Vivado
-* Developers that choose to develop on-premises need to have Xilinx license 'EF-VIVADO-SDX-VU9P-OP' installed. For more help, please refer to the [on-premises licensing help](./hdk/docs/on_premise_licensing_help.md)
-* The following simulators are supported with this HDK:
-**Vivado XSIM RTL simulator
-** Mentor Graphics' Questa RTL simulator (with a separate license from MentorGraphics)
-** Synopsys' VCS RTL simulator (with a separate license from Synopsys)
-
-## License Requirements
-
-The HDK and SDK in the FPGA development kit have different licenses. For more details please refer to the [HDK License](./hdk/LICENSE.txt) and the [SDK License](./sdk/LICENSE.txt).
-
-## FAQs
-
-**Q: How do I know which HDK version I have on my instance/machine? **
-
-Look for the ./hdk/hdk_version.txt file.
-
-**Q: How do I know what my Shell version is? **
-
-The Shell version of an FPGA slot is available through the FPGA Image Management tools after an AFI has been loaded. See the description of `fpga-describe-local-image` for more details on retrieving the shell version from a slot. Prior to loading an AFI, the state of the FPGA (including shell version) is undefined and non-deterministic.
-
-**Q: How do I know what version of FPGA Image management tools are running on my instance? **
-
-The FPGA Image management tools version is reported with any command executed from these tools. See the description of `fpga-describe-local-image` for more details.
-
-**Q: How do I update my existing design with this release?**
-
-1. Start by either cloning the entire GitHub structure for the HDK release or downloading new directories that have changed. AWS recommends an entire GitHub clone to ensure no files are missed
-2. Update the CL design to conform to the new AWS_Shell_Interface_Specification TODO: add link. TODO: need a doc to outline what changes are a MUST in this upgrade, and which ones are optional?
-3. Follow the process for AFI generation outlined in aws-fpga/hdk/cl/examples/readme.md
-4. Update FPGA Image Management Tools to the version included in aws-fpga/sdk/management
-TODO: SDaccel design have different steps?
-
-**Q: How do I get support?**
-
-The FPGA Development forum provides an easy access to Developer support. It's the first place to go to post questions, suggestions and receive important announcements from the AWS FPGA team. To gain access to the user forum, please go to https://forums.aws.amazon.com/index.jspa and login. To be notified of important messages you will need to click the “Watch Forum” button on the right side of the screen.
-
-**Q: How do I know which HDK GitHub release I am working with? **
-
-See the release notes at the top of the GitHub directory to identify the version of your GitHub clone.
-
-TODO: The following major features are included in this HDK release:
-
-
## Previous release notes
## Release 1.3.X Details (See [ERRATA](./ERRATA.md) for unsupported features)
@@ -224,7 +257,7 @@ The following major features are included in this HDK release:
* Restrictions on URAM have been updated to enable 100% of the URAM with a CL to be utilized. See documentation on enabling URAM utilization: [URAM_options](./hdk/docs/URAM_Options.md)
### 5. Vivado IP Integrator (IPI) and GUI Workflow
-* Vivado graphical design canvas and project-based flow is now supported. This flow allows developers to create CL logic as either RTL or complex subsystems based on an IP centric block diagram. Prior experience in RTL or system block designs is recommended. The [IP Integrator and GUI Vivado workflow](hdk/docs/IPI_GUI_Vivado_Setup.md) enables a unified graphical environment to guide the developer through the common steps to design, implement, and verify FGPAs. To get started, start with the [README that will take you through getting started steps and documents on IPI](hdk/docs/IPI_GUI_Vivado_Setup.md)
+* Vivado graphical design canvas and project-based flow is now supported. This flow allows developers to create CL logic as either RTL or complex subsystems based on an IP centric block diagram. Prior experience in RTL or system block designs is recommended. The [IP Integrator and GUI Vivado workflow](hdk/docs/IPI_GUI_Vivado_Setup.md) enables a unified graphical environment to guide the developer through the common steps to design, implement, and verify FPGAs. To get started, start with the [README that will take you through getting started steps and documents on IPI](hdk/docs/IPI_GUI_Vivado_Setup.md)
### 6. Build Flow improvments
* See [Build_Scripts](./hdk/common/shell_v04261818/build/scripts)
diff --git a/SDAccel/FAQ.md b/SDAccel/FAQ.md
index adc3d021..3f41a2d6 100644
--- a/SDAccel/FAQ.md
+++ b/SDAccel/FAQ.md
@@ -1,51 +1,59 @@
# Frequently Asked Questions (FAQ)
-## Q: When I run my application on F1, I see these errors: ERROR: Failed to load xclbin ERROR: No program executable for device ERROR: buffer (2) is not resident in device (0)", how to debug these errors?
-A: First double check that your AFI has been generated successfully by reviewing the SDAccel README. Second, check that you are running your application on F1 using sudo. Lastly, check that your AWS CLI (configure) was configured using output format as json.
+## Q: When I run my application on F1, I see these errors: ERROR: Failed to load xclbin ERROR: No program executable for device ERROR: buffer (2) is not resident in device (0)", how to debug these errors?
+A:
+* Check that your AFI has been generated successfully by reviewing the SDAccel README.
+* Check that you are running your application on F1 as super user(sudo).
+* Lastly, check that your AWS CLI (configure) was configured using output format as json.
## Q: During AFI generation (create_sdaccel_afi.sh), how do I resolve this error: "An error occurred (AuthFailure) when calling the CreateFpgaImage operation: AWS was not able to validate the provided access credentials"?
-A: For an AFI generation to complete all errors must be resolved. This error ("An error occurred (AuthFailure) when calling the CreateFpgaImage operation: AWS was not able to validate the provided access credentials") message means your AWS credentials were not setup properly or your IAM does not have access to the API (CreateFpgaImage). Here is some additional info on how to setup IAM privileges.
-http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html
+A:
+
+This error message means your AWS credentials or IAM role were not setup correctly to have access to the API (CreateFpgaImage).
+AWS Accounts require IAM permissions to access API functions. To test your IAM permissions use [DescribeFpgaImage API](https://github.com/aws/aws-fpga/blob/master/hdk/docs/describe_fpga_images.md)
+
+To setup IAM privileges please check the [EC2 API Permissions documentation](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html)
-AWS Accounts require IAM permisions to access API functions. To test your IAM permissions use DescribeFpgaImage API:
-https://github.com/aws/aws-fpga/blob/master/hdk/docs/describe_fpga_images.md
## Q: During AFI generation (create_sdaccel_afi.sh), my AFI failed to generate and I see this error message in the log: "Provided clocks configuration is illegal. See AWS FPGA HDK documentation for supported clocks configuration. Frequency 0 is lower than minimal supported frequency of 80", how do I debug this message?
-A: Please confirm that you successfully compiled your kernel for HW. For the quick start examples, you will need to have completed the quick start and successfully passed this command: make TARGETS=hw DEVICES=$AWS_PLATFORM all
+A:
+* Please confirm that you successfully compiled your kernel for HW.
+* For the quick start examples, you will need to have completed the quick start and successfully passed this command: `make TARGETS=hw DEVICES=$AWS_PLATFORM all`
-## Q: What is a xclbin or binary container on SDAccel?
+## Q: What is a xclbin or binary container on SDAccel? What is an awsxclbin?
A: The [xclbin](https://www.xilinx.com/html_docs/xilinx2017_2/sdaccel_doc/topics/design-flows/concept-create-compute-unit-binary.html) file or the "Binary Container" is a binary library of kernel compute units that will be loaded together into an OpenCL context for a specific device.
-AWS uses a modified version of the xclbin called awsxclbin. The awsxclbin contains the xclbin metadata and AFI ID.
+AWS uses a modified version of the xclbin called awsxclbin. The awsxclbin contains the xclbin metadata and AFI ID.
## Q: What can we investigate when xocc fails with a path not meeting timing?
A: An example is WARNING: [XOCC 60-732] Link warning: One or more timing paths failed timing targeting MHz for . The frequency is being automatically changed to MHz to enable proper functionality.
1. Generally speaking, lowering the clock will make the design functionally operational in terms of operations (since there will not be timing failures) but the design might not operate at the performance needed due this clock frequency change. We can review what can be done.
-1. If CLOCK_NAME is `kernel clock 'DATA_CLK'` then this is the clock that drives the kernels. Try reducing the kernel clock frequency see --kernel_frequency option to xocc in [latest SDAccel Environment User Guide]
+1. If CLOCK_NAME is `kernel clock 'DATA_CLK'` then this is the clock that drives the kernels. Try reducing the kernel clock frequency see --kernel_frequency option to xocc in the [latest SDAccel Environment User Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2019_1/ug1023-sdaccel-user-guide.pdf).
1. If CLOCK_NAME is `system clock 'clk_main_a0'` then this is the clock clk_main_a0 which drives the AXI interconnect between the AWS Shell and the rest of the platform (SDAccel peripherals and user kernels). Using --kernel_frequency as above does not have any direct effect but might have side effect in changing the topology/placement of the design and improve this issue.
1. If OCL/C/C++ kernels were also used, investigate VHLS reports / correlate with kernel source code to see if there are functions with large number of statements in basic block, examples: might have unrolled loops with large loop-count, might have a 100++ latency; the VHLS runs and log files are located in the directory named `_xocc*compile*`
1. Try `xocc -O3` to run bitstream creation process with higher efforts.
-1. Open a Vivado implementation project using ```vivado `find -name ipiimpl.xpr` ``` to analyze the design; needs Vivado knowledge; see [UltraFast Design Methodology Guide for the Vivado][latest UG949]
+1. Open a Vivado implementation project using ```vivado `find -name ipiimpl.xpr` ``` to analyze the design; needs Vivado knowledge; see [UltraFast Design Methodology Guide for the Vivado](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2019_1/ug949-vivado-design-methodology.pdf)
## Q: xocc issues message WARNING: [XOCC 204-69] Unable to schedule ...due to limited memory ports.
A: This may lower the performance of the implementation.
-Details on this are provided in [Debug HLS Performance: Limited memory ports]
+Details on this are provided in [the SDAccel HLS Debug document](docs/SDAccel_HLS_Debug.md)
## Q: xocc fails due to routing/resource overflow
-A: Examine utilization reports. If OCL/C/C++ kernels were also used, look into the source code for excessive unroll happening.
+A: Examine utilization reports. If OCL/C/C++ kernels were also used, look into the source code for excessive unroll happening.
## Q: How do I open the design as a Vivado project (.xpr)?
A: There are 2 Vivado project files:
1. CL Design - from command line: ```vivado `find -name ipiprj.xpr\` ``` to see the connectivity of the created design
-1. Implementation project - from command line: ```vivado `find -name ipiimpl.xpr\` ``` to analyze the design in the place and routing design phases. For an additional Vivado Design reference, see [UltraFast Design Methodology Guide for the Vivado][latest UG949]
+1. Implementation project - from command line: ```vivado `find -name ipiimpl.xpr\` ``` to analyze the design in the place and routing design phases.
+ 1. For an additional Vivado Design reference, see the [UltraFast Design Methodology Guide for the Vivado](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2019_1/ug949-vivado-design-methodology.pdf)
## Q: What should I do if FPGA instance execution gets the wrong results or gets stuck?
A:
1. Verify hw_emu works as expected
-1. See "Chapter 4 - Debugging Applications in the SDAccel Environment" in [latest SDAccel Environment User Guide]
+1. See the "Debugging Applications in the SDAccel Environment" chapter in the [latest SDAccel Environment User Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2019_1/ug1023-sdaccel-user-guide.pdf).
## Q: Bitstream creation fails to create design less that 60 MHz?
A: SDAccel flow does not allow clocks running less than 60 MHz kernel clock, therefore, you will need to debug further using [HLS Debug suggestions](./docs/SDAccel_HLS_Debug.md)
@@ -63,19 +71,21 @@ A: Please make sure you executed the following commands before launching the SDx
## Q: How do I debug error: `No current synthesis run set`?
A: You may have run the previous [HDK IPI examples](../hdk/docs/IPI_GUI_Vivado_Setup.md) and created a `Vivado_init.tcl` file in `~/.Xilinx/Vivado`. It is recommended to remove it before switching from hardware development flow to SDAccel.
-# Additional Resources
+## Q: I am getting an error: `symbol lookup error: /opt/xilinx/xrt/lib/libxrt_aws.so: undefined symbol: uuid_parse` What should I do?
+A: This error occured because the XRT RPM was built without linking in a library needed for the uuid symbols.
+ To fix it, use the latest XRT RPM's documented in the [XRT installation document](docs/XRT_installation_instructions.md)
-The [AWS SDAccel README].
+## Q: What is the lowest frequency SDAccel design supported on the AWS F1 Platform?
+A: We support creating AFI's from CL's that have been built to work at Frequencies no lower than 80MHz.
+ Re-clocking/Loading a dynamic clock frequency lower than 80MHz will also result in an error.
-Xilinx web portal for [Xilinx SDAccel documentation] and for [Xilinx SDAccel GitHub repository]
+# Additional Resources
-Links pointing to **latest** version of the user guides
- * [UG1023: SDAccel Environment User Guide][latest SDAccel Environment User Guide]
- * [UG1021: SDAccel Environment Tutorial: Getting Started Guide (including emulation/build/running on H/W flow)][latest UG1021]
- * [UG1207: SDAccel Environment Optimization Guide][latest SDAccel Environment Optimization Guide]
- * [UG949: UltraFast Design Methodology Guide for the Vivado Design Suite][latest UG949]
+* The [AWS SDAccel README](README.md).
+* Xilinx web portal for [Xilinx SDAccel documentation](https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html?resultsTablePreSelect=xlnxdocumenttypes:SeeAll#documentation)
+* [Xilinx SDAccel GitHub repository](https://github.com/Xilinx/SDAccel_Examples)
-Links pointing to **2017.4** version of the user guides
+* Links pointing to **2017.4** version of the user guides
* [UG1023: SDAccel Environment User Guide][UG1023 2017.4]
* [UG1021: SDAccel Environment Tutorial: Getting Started Guide (including emulation/build/running on H/W flow)][UG1021 2017.4]
* [UG1207: SDAccel Environment Optimization Guide][UG1207 2017.4]
diff --git a/SDAccel/Makefile b/SDAccel/Makefile
index 436c6882..284cbe04 100644
--- a/SDAccel/Makefile
+++ b/SDAccel/Makefile
@@ -40,9 +40,9 @@ $(info OS is $(OS))
MODULE :=
ifeq ($(RELEASE_VER),2017.4)
DSA := $(DSA)
- SRC_DIR = src2
+ SRC_DIR = src
XRT_HAL_LIB = libxrt-aws.so
- EXE = awssak2
+ EXE = awssak
MODULE = xocl
ifeq ($(OS),Ubuntu)
GLIBCPP_PATH = lib/lnx64.o/Ubuntu
diff --git a/SDAccel/README.md b/SDAccel/README.md
index 2bfdfa23..da59da6e 100644
--- a/SDAccel/README.md
+++ b/SDAccel/README.md
@@ -43,7 +43,7 @@ It is highly recommended you read the documentation and utilize software and har
* Launch an instance using the [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) which comes pre-installed with SDAccel and required licenses.
* You may use this F1 instance to [build your host application and Xilinx FPGA binary](#createapp), however, it is more cost efficient to either:
* Launch the [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) on a compute EC2 instance, with a minimum of 30GiB RAM), **OR**
- * Follow the [On-Premises Instructions](../hdk/docs/on_premise_licensing_help.md) to purchase and install a license from Xilinx.
+ * Follow the [On-Premises Instructions](../docs/on_premise_licensing_help.md) to purchase and install a license from Xilinx.
* Setup AWS IAM permissions for creating FPGA Images (CreateFpgaImage and DescribeFpgaImages). [EC2 API Permissions are described in more detail](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html). It is highly recommended that you validate your AWS IAM permissions prior to proceeding with this quick start. By calling the [DescribeFpgaImages API](../hdk/docs/describe_fpga_images.md) you can check that your IAM permissions are correct.
* [Setup AWS CLI and S3 Bucket](docs/Setup_AWS_CLI_and_S3_Bucket.md) to enable AFI creation.
* Install optional [packages](packages.txt) required to run all examples. If you do not install these packages, some examples may not work properly. The setup scripts will warn you of any missing packages.
@@ -63,9 +63,7 @@ It is highly recommended you read the documentation and utilize software and har
$ cd $AWS_FPGA_REPO_DIR
$ source sdaccel_setup.sh
```
- * This section describes the valid platforms for shell_v04261818
- * Xilinx Tool 2017.4 Platform:
- * AWS_PLATFORM_DYNAMIC_5_0 - (Default) AWS F1 platform dynamically optimized for multi DDR use cases.
+ * Valid platforms for shell_v04261818: `AWS_PLATFORM_DYNAMIC_5_0` (Default) AWS F1 platform dynamically optimized for multi DDR use cases.
* Changing to a different platform can be accomplished by setting AWS_PLATFORM environment variable. Only one platform is supported for this example:
```
@@ -91,7 +89,7 @@ For CPU-based (SW) emulation, both the host code and the FPGA binary code are co
The instructions below describe how to run the SDAccel SW Emulation flow using the Makefile provided with a simple "hello world" example
```
- $ cd $SDACCEL_DIR/examples/xilinx/getting_started/host/helloworld_ocl/
+ $ cd $SDACCEL_DIR/examples/xilinx/getting_started/hello_world/helloworld_ocl/
$ make clean
$ make check TARGETS=sw_emu DEVICES=$AWS_PLATFORM all
```
@@ -106,7 +104,7 @@ The SDAccel hardware emulation flow enables the developer to check the correctne
The instructions below describe how to run the HW Emulation flow using the Makefile provided with a simple "hello world" example:
```
- $ cd $SDACCEL_DIR/examples/xilinx/getting_started/host/helloworld_ocl/
+ $ cd $SDACCEL_DIR/examples/xilinx/getting_started/hello_world/helloworld_ocl/
$ make clean
$ make check TARGETS=hw_emu DEVICES=$AWS_PLATFORM all
```
@@ -120,7 +118,7 @@ The SDAccel system build flow enables the developer to build their host applicat
The instructions below describe how to build the Xilinx FPGA Binary and host application using the Makefile provided with a simple "hello world" example:
```
- $ cd $SDACCEL_DIR/examples/xilinx/getting_started/host/helloworld_ocl/
+ $ cd $SDACCEL_DIR/examples/xilinx/getting_started/hello_world/helloworld_ocl/
$ make clean
$ make TARGETS=hw DEVICES=$AWS_PLATFORM all
```
@@ -139,7 +137,7 @@ This assumes you have:
The [create_sdaccel_afi.sh](./tools/create_sdaccel_afi.sh) script is provided to facilitate AFI creation from a Xilinx FPGA Binary, it:
* Takes in your Xilinx FPGA Binary \*.xclbin file
-* Calls *aws ec2 create_fgpa_image* to generate an AFI under the hood
+* Calls *aws ec2 create_fpga_image* to generate an AFI under the hood
* Generates a \_afi_id.txt which contains the identifiers for your AFI
* Creates an AWS FPGA Binary file with an \*.awsxclbin extension that is composed of: Metadata and AGFI-ID.
* **This \*.awsxclbin is the AWS FPGA Binary file that will need to be loaded by your host application to the FPGA**
@@ -193,15 +191,14 @@ For help with AFI creation issues, see [create-fpga-image error codes](../hdk/do
# 3. Run the FPGA accelerated application on Amazon FPGA instances
-Here are the steps:
-* Start an FPGA instance using [FPGA Developer AMI on AWS Marketplace](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) and check the AMI [compatiability table](../README.md#devAmi) and [runtime compatilibility table](docs/Create_Runtime_AMI.md#runtime-ami-compatability-table). Alternatively, you can [create your own Runtime AMI](docs/Create_Runtime_AMI.md) for running your SDAccel applications on Amazon FPGA instances.
+* Start an FPGA instance using [FPGA Developer AMI on AWS Marketplace](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) and check the AMI [compatibility table](../README.md#fpga-developer-ami) and [runtime compatibility table](docs/Create_Runtime_AMI.md#runtime-ami-compatibility-table). Alternatively, you can [create your own Runtime AMI](docs/Create_Runtime_AMI.md) for running your SDAccel applications on Amazon FPGA instances.
* *Assuming the developer flow (compilation) was done on a separate instance you will need to:*
* Copy the compiled host executable (exe) to the new instance
* Copy the \*.awsxclbin AWS FPGA binary file to the new instance
- * Depending on the host code, the \*.awsxclbin may need to named .hw..awsxclbin . Ex: ```vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin```
+ * Depending on the host code, the \*.awsxclbin may need to named \.hw.\.awsxclbin .For Example: ```vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin```
* Copy any data files required for execution to the new instance
* [Clone the github repository to the new F1 instance and install runtime drivers](#gitsetenv)
- * Clone the github repository to the new F1 instance and install runtime drivers
+
```
$ git clone https://github.com/aws/aws-fpga.git $AWS_FPGA_REPO_DIR
$ cd $AWS_FPGA_REPO_DIR
@@ -212,7 +209,7 @@ Here are the steps:
* Source the Runtime Environment & Execute your Host Application:
```
- $ sudo sh
+ $ sudo -E /bin/bash
# source $AWS_FPGA_REPO_DIR/sdaccel_runtime_setup.sh # Other runtime env settings needed by the host app should be setup after this step
# ./helloworld
```
diff --git a/SDAccel/docs/Create_Runtime_AMI.md b/SDAccel/docs/Create_Runtime_AMI.md
index 0adf3f95..eadaf4ec 100644
--- a/SDAccel/docs/Create_Runtime_AMI.md
+++ b/SDAccel/docs/Create_Runtime_AMI.md
@@ -1,19 +1,20 @@
# Create a Runtime AMI Starting with an Amazon Linux AMI or Ubuntu
-## Runtime AMI Compatability Table
+## Runtime AMI Compatibility Table
| SDx Version used for AFI Development | Compatible SDAccel Runtime |
|--------------------------------------|-----------------------------|
| 2017.4 | Runtime installed by sourcing "sdaccel_setup.sh" while using HDK Ver 1.4.X when environment variable RELEASE_VER=2017.4 |
- | 2018.2 | AWS FPGA Developer AMI 1.5.0 ( XRT is pre-installed) or [Runtime installed with XRT Version 2.1.0](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html) |
- | 2018.3 | AWS FPGA Developer AMI 1.6.0 ( XRT is pre-installed) or [Runtime installed with XRT Version 2.1.0](https://xilinx.github.io/XRT/2018.3/html/build.html) |
+ | 2018.2 | AWS FPGA Developer AMI 1.5.0 (XRT is pre-installed) or [Runtime installed with XRT Version 2.1.0](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html) |
+ | 2018.3 | AWS FPGA Developer AMI 1.6.0 (XRT is pre-installed) or [Runtime installed with XRT Version 2.1.0](https://xilinx.github.io/XRT/2018.3/html/build.html) |
+ | 2019.1 | AWS FPGA Developer AMI 1.7.0 (XRT is pre-installed) or [Runtime installed with XRT Version 2.1.0](https://xilinx.github.io/XRT/2019.1/html/build.html) |
## 1. Launch a Runtime Instance & Install Required Packages
* Please note Amazon Linux 2 or Amazon Linux are not supported by Xilinx XRT at this time. Please use Centos/RHEL or Ubuntu when using Xilinx XRT Runtimes for the AFIs generated using Xilinx SDx 2018.2 and 2018.3 toolsets.
-* Launch an F1 instance using an [Amazon Linux AMI](https://aws.amazon.com/marketplace/pp/B00635Y2IW) or [Centos 7](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
-* Install the required updates
+* Launch an F1 instance using [Centos 7](https://aws.amazon.com/marketplace/pp/B00O7WM7QW) or Amazon Linux AMI's
+* Update to get the latest packages.
````
$ sudo yum update
@@ -54,11 +55,11 @@
* Using an instance running [FPGA Developer AMI](https://aws.amazon.com/marketplace/pp/B06VVYBLZZ) or an on-premises machine with access to a Xilinx SDAccel Tools Installation, first source $AWS_FPGA_REPO_DIR/sdaccel_setup.sh and then run following commands:
-* if using Ubuntu or debian distribution set GLIBPATH env variable to Ubuntu. If using any other OS distribution set GLIBPATH to default.
+* If using Ubuntu or Debian distributions set GLIBPATH env variable to Ubuntu. If using any other OS distribution set GLIBPATH to default.
-* set env variable 'XLNXRTE' to intended runtime install directory path.
+* Set env variable 'XLNXRTE' to intended runtime install directory path.
-### **For Vivado SDX 2017.4**
+### **Xilinx SDX 2017.4: **
````
$ export GLIBPATH=
@@ -67,8 +68,8 @@
$ mkdir -p $XLNXRTE/lib/lnx64.o
$ mkdir -p $XLNXRTE/runtime/bin
$ mkdir -p $XLNXRTE/runtime/lib/x86_64
- $ cp $SDACCEL_DIR/userspace/src2/libxrt-aws.so $XLNXRTE/runtime/platforms/xilinx_aws-vu9p-f1-04261818_dynamic_5_0/driver/
- $ cp $SDACCEL_DIR/tools/awssak2/xbsak $XLNXRTE/runtime/bin/
+ $ cp $SDACCEL_DIR/userspace/src/libxrt-aws.so $XLNXRTE/runtime/platforms/xilinx_aws-vu9p-f1-04261818_dynamic_5_0/driver/
+ $ cp $SDACCEL_DIR/tools/awssak/xbsak $XLNXRTE/runtime/bin/
$ cp $XIILNX_SDX/lib/lnx64.o/$GLIBPATH/libstdc++.so* xlnxrte/lib/x86_64/
$ cp $XIILNX_SDX/runtime/bin/xclbinsplit xlnxrte/runtime/bin/
$ cp $XIILNX_SDX/runtime/bin/xclbincat xlnxrte/runtime/bin/
@@ -79,14 +80,12 @@
* You may need to update path in $XLNXRTE/setup.sh and $XLNXRTE/setup.csh script to match your runtime instance.
* Copy $XLNXRTE directory created to $HOME on your Runtime Instance.
-### **For Vivado SDX 2018.2**
-
- Please refer [installing Xilinx SDx 2018.2 XRT](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html) for instructions on how to install XRT on your AMI.
+### **Xilinx SDx 2018.2:** [Install 2018.2 XRT](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html).
-### **For Vivado SDX 2018.3**
-
- Please refer [installing Xilinx SDx 2018.3 XRT](https://xilinx.github.io/XRT/2018.3/html/build.html) for instructions on how to install runtime on your AMI.
+### **Xilinx SDx 2018.3:** [Install 2018.3 XRT](https://xilinx.github.io/XRT/2018.3/html/build.html).
+### **Xilinx SDx 2019.1:** [Install 2019.1 XRT](https://xilinx.github.io/XRT/2019.1/html/build.html).
+
## 3. Install Runtime Drivers and run your FPGA accelerated application on your Runtime Instance.
* Log back on to the Runtime Instance:
diff --git a/SDAccel/docs/README_GUI.md b/SDAccel/docs/README_GUI.md
index 0818eaac..4d3f5485 100644
--- a/SDAccel/docs/README_GUI.md
+++ b/SDAccel/docs/README_GUI.md
@@ -7,7 +7,7 @@ The guide explains how to:
1. Verify the application
1. Build the application to execute on FPGA hardware
-**Note**: It is highly recommended to review the [SDAccel Guide][SDAccel_Guide] to fully understand the SDAccel flow before using the GUI.
+**Note**: It is highly recommended to review the [AWS F1 SDAccel Guide](SDAccel_Guide_AWS_F1.md) to fully understand the SDAccel flow before using the GUI.
## Cloning the aws-fpga Git repository
The AWS Github repository contains the example used in this tutorial.
@@ -29,13 +29,9 @@ The SDAccel examples from the github are downloaded by the above steps. However,
First change directory to **helloworld_ocl** example.
```
- $ cd /SDAccel/examples/xilinx_2017.4/getting_started/host/helloworld_ocl
-```
-The github examples use common header files and those needs to be copied in the local project source folder to make it easier to use.
-Type the command **make local-files** to copy all necessary files in the local directory.
-```
- $ make local-files
+ $ cd /SDAccel/examples/xilinx/getting_started/hello_world/helloworld_ocl
```
+
The SDAccel GUI is invoked with the **sdx** command.
```
@@ -46,9 +42,9 @@ To debug using gdb inside from SDX gui few additional commands are needed to exe
```
$ mv /usr/local/Modules/init init.bak
- $ unset –f switchml
- $ unset –f _moduleraw
- $ unset –f module
+ $ unset -f switchml
+ $ unset -f _moduleraw
+ $ unset -f module
$ sdx
```
@@ -62,7 +58,7 @@ We will now cover the following steps:
Add workspace inside the current directory named "GUI_test" as shown below. A new directory **GUI_test** will be created and used to store all logfiles of our runs.
-
+
@@ -71,7 +67,7 @@ You will get a Welcome screen. You need to set Platform path by selecting **Add
-
+
@@ -79,7 +75,7 @@ Click on the **plus** sign as shown below.
-
+
diff --git a/SDAccel/docs/SDAccel_Guide_AWS_F1.md b/SDAccel/docs/SDAccel_Guide_AWS_F1.md
index 98cffd63..66bf0a73 100644
--- a/SDAccel/docs/SDAccel_Guide_AWS_F1.md
+++ b/SDAccel/docs/SDAccel_Guide_AWS_F1.md
@@ -164,42 +164,19 @@ Conversely, code which is simply a few lines of basic operations, and has no tas
# Additional Resources
-The [AWS SDAccel README].
-
-Xilinx web portal for [Xilinx SDAccel documentation] and for [Xilinx SDAccel GitHub repository]
-
-
-Links pointing to **2017.4** version of the user guides
-1. [UG1023: SDAccel Environment User Guide][UG1023 2017.4]
-1. [UG1021: SDAccel Environment Tutorial: Getting Started Guide (including emulation/build/running on H/W flow)][UG1021 2017.4]
-1. [UG1207: SDAccel Environment Optimization Guide][UG1207 2017.4]
-1. [UG949: UltraFast Design Methodology Guide for the Vivado Design Suite][UG949 2017.4]
-1. [UG1238: SDx Development Environment Release Notes, Installation, and Licensing Guide][UG1238 2017.4]
-
-
-
-[SDAccel_landing_page]: https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html
-[VHLS_landing_page]: https://www.xilinx.com/products/design-tools/vivado/integration/esl-design.html
-[Vivado_landing_page]: https://www.xilinx.com/products/design-tools/vivado.html
-
-[SDAccel Environment User Guide]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1023-sdaccel-user-guide.pdf
-[UG1021]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1021-sdaccel-intro-tutorial.pdf
-[SDAccel Environment Optimization Guide]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1207-sdaccel-optimization-guide.pdf
-[UG949]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug949-vivado-design-methodology.pdf
-[UG902]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug902-vivado-high-level-synthesis.pdf
-
-[UG1023 2017.4]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1023-sdaccel-user-guide.pdf
-[UG1021 2017.4]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1021-sdaccel-intro-tutorial.pdf
-[UG1207 2017.4]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1207-sdaccel-optimization-guide.pdf
-[UG1238 2017.4]:http://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1238-sdx-rnil.pdf
-[Xilinx SDAccel documentation]: https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html#documentation
-[Xilinx SDAccel GitHub repository]: https://github.com/Xilinx/SDAccel_Examples
-[UG949 2017.4]: https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug949-vivado-design-methodology.pdf
-
-[AWS SDAccel Readme]: ../README.md
-[OnPremiseDev]: ./On_Premises_Development_Steps.md
-[Power_Analysis]: ./SDAccel_Power_Analysis.md
-[GUI_README]: ./README_GUI.md
-[FAQ]:../FAQ.md
+* The [AWS SDAccel README](../README.md).
+* Xilinx web portal for [Xilinx SDAccel documentation](https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html?resultsTablePreSelect=xlnxdocumenttypes:SeeAll#documentation)
+* [Xilinx SDAccel GitHub repository](https://github.com/Xilinx/SDAccel_Examples)
+* [Xilinx SDAccel landing page](https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html)
+* [Vivado HLS landing page](https://www.xilinx.com/products/design-tools/vivado/integration/esl-design.html)
+* [Vivado landing page](https://www.xilinx.com/products/design-tools/vivado.html)
+* [SDAccel Environment User Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1023-sdaccel-user-guide.pdf)
+* [SDAccel Intro Tutorial](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1021-sdaccel-intro-tutorial.pdf)
+* [SDAccel Environment Optimization Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug1207-sdaccel-optimization-guide.pdf)
+* [UltraFast Design Methodology Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug949-vivado-design-methodology.pdf)
+* [Vivado High Level Synthesis User Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2017_4/ug902-vivado-high-level-synthesis.pdf)
+* [On Premise Development steps](On_Premises_Development_Steps.md)
+* [SDAccel Power Analysis](SDAccel_Power_Analysis.md)
+* [FAQ](../FAQ.md)
diff --git a/SDAccel/docs/SDAccel_HLS_Debug.md b/SDAccel/docs/SDAccel_HLS_Debug.md
index dffbba14..6b0d4901 100755
--- a/SDAccel/docs/SDAccel_HLS_Debug.md
+++ b/SDAccel/docs/SDAccel_HLS_Debug.md
@@ -1,4 +1,4 @@
-# Debug HLS Performance: Limited memory ports.
+# Debug HLS Performance: Limited memory ports
In an ideal FPGA implementation, the kernel will process 1 data sample per clock cycle. In the High-Level Synthesis (HLS) technology used in SDAccel, this is referred to an II=1 implementation, where II is the Initiation Interval of design, or the number of clock cycles before the design can read new data inputs.
diff --git a/SDAccel/docs/SDAccel_Migrate_dynamic_DSA.md b/SDAccel/docs/SDAccel_Migrate_dynamic_DSA.md
index 7dac1ee4..b48b9afe 100644
--- a/SDAccel/docs/SDAccel_Migrate_dynamic_DSA.md
+++ b/SDAccel/docs/SDAccel_Migrate_dynamic_DSA.md
@@ -47,7 +47,7 @@ set_property sdx_kernel_type rtl [ipx::current_core]
* Profiling hardware no longer pre-built in the platform. Instead, it is added compile time to the design.
* This requires an update to the xocc command options.
* (2017.4) Add the -profile_kernel option the xocc command to enable profile instrumentation when compiling the kernel; set profile=true in the sdaccel.ini file to collect profile data when running the application.
-
+ * (2019.1) Add the -profile_kernel option the xocc command to enable profile instrumentation when compiling the kernel; set profile=true in the xrt.ini file to collect profile data when running the application.
## Additional resources
* [SDAccel Development Enviroment - Changes for 2017.4](https://www.xilinx.com/html_docs/xilinx2017_4/sdaccel_doc/jdl1512623841682.html)
* [SDAccel Development Enviroment - Whats new for 2017.4](https://www.xilinx.com/html_docs/xilinx2017_4/sdaccel_doc/rke1512623904797.html)
diff --git a/SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md b/SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md
index 2b1bdeb4..288e6527 100644
--- a/SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md
+++ b/SDAccel/docs/Setup_AWS_CLI_and_S3_Bucket.md
@@ -1,7 +1,7 @@
## Setup CLI and Create S3 Bucket
-The developer is required to create a S3 bucket for the AFI generation. The bucket will contain a tar file and logs which are generated from the AFI creation service.
+The developer is required to create an S3 bucket for the AFI generation. The bucket will contain a tar file and logs which are generated from the AFI creation service.
-To install the AWS CLI, please follow the instructions here: (http://docs.aws.amazon.com/cli/latest/userguide/installing.html).
+To install the AWS CLI, please follow the [instructions here](http://docs.aws.amazon.com/cli/latest/userguide/installing.html).
The AWS SDAccel scripts require JSON output format and the scripts will not work properly if you use any other output format types (ex: text, table). JSON is the default output format of the AWS CLI.
diff --git a/SDAccel/docs/XRT_installation_instructions.md b/SDAccel/docs/XRT_installation_instructions.md
index c0125c13..a6503c98 100644
--- a/SDAccel/docs/XRT_installation_instructions.md
+++ b/SDAccel/docs/XRT_installation_instructions.md
@@ -1,122 +1,61 @@
-# XRT Installation Instructions
-
-# Installing Xilinx Runtime (XRT) 2018.3 RC3 Patch 1
-
- * Applicable SDx Tool Version: 2018.3
-
- * XRT Release Tag: 2018.3.3.1 (SHA: 48cafdc100b29843fd013d371ffba0141db06b7a)
-
- * [Xilinx Runtime (XRT) 2018.3 RC3 Patch 1 release](https://github.com/Xilinx/XRT/releases/tag/2018.3.3.1)
-
- ### Instructions to build & install XRT
-
- Pre-requisite commands used to build XRT for AWS F1 platform for this release
-
- ```
- git clone http://www.github.com/aws/aws-fpga.git
- cd aws-fpga
- source sdaccel_setup.sh
- mkdir $SDACCEL_DIR/Runtime
- cd $SDACCEL_DIR/Runtime
- export XRT_PATH="${SDACCEL_DIR}/Runtime/XRT_20183rc3p1 "
- git clone http://www.github.com/Xilinx/XRT.git -b 2018.3.3.1 ${XRT_PATH}
- cd ${XRT_PATH}
- sudo ./src/runtime_src/tools/scripts/xrtdeps.sh
- cd build
-
- ```
-
- Follow [Xilinx's instructions to build & install XRT on Centos/Redhat & Ubuntu/Debian](https://xilinx.github.io/XRT/master/html/build.html#xrt-for-pcie-platforms) to build XRT for supported OS.
-
- ### Install on Centos/RedHat Linux using prebuilt RPM
-
- ```
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.6.0/Patches/XRT_2018_3_RC3_Patch1/xrt_201803.2.1.0_7.5.1804-xrt.rpm -o xrt_201803.2.1.0_7.5.1804-xrt.rpm
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.6.0/Patches/XRT_2018_3_RC3_Patch1/xrt_201803.2.1.0_7.5.1804-aws.rpm -o xrt_201803.2.1.0_7.5.1804-aws.rpm
- sudo yum remove -y xrt-aws
- sudo yum remove -y xrt
- sudo yum install -y xrt_201803.2.1.0_7.5.1804-xrt.rpm
- sudo yum install -y xrt_201803.2.1.0_7.5.1804-aws.rpm
-
- ```
-
-# Installing Xilinx Runtime (XRT) 2018.2_XDF.RC4
-
- * Applicable SDx Tool Version: 2018.2
-
- * XRT Release Tag: 2018.2_XDF.RC4 (SHA: 343186f76f59edd01bc48d84cf67fe22a0a3f338)
-
- * [Xilinx Runtime (XRT) 2018.2_XDF.RC4 release](https://github.com/Xilinx/XRT/tree/2018.2_XDF.RC4)
-
- ### Instructions to build & install XRT
-
- Pre-requisite commands used to build XRT for AWS F1 platform for this release
-
- ```
- git clone http://www.github.com/aws/aws-fpga.git
- cd aws-fpga
- source sdaccel_setup.sh
- mkdir $SDACCEL_DIR/Runtime
- cd $SDACCEL_DIR/Runtime
- export XRT_PATH="${SDACCEL_DIR}/Runtime/XRT_20182rc4"
- git clone http://www.github.com/Xilinx/XRT.git -b 2018.2_XDF.RC4 ${XRT_PATH}
- cd ${XRT_PATH}
- sudo ./src/runtime_src/tools/scripts/xrtdeps.sh
- cd build
-
- ```
- Follow [ Xilinx's instructions to build & install XRT on Centos/RedHat & Ubuntu/Debian](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html) to build XRT for supported OS.
-
- ### Install on Centos/RedHat Linux using prebuilt RPMs
-
- Run following commands to download and install XRT 2018.2_XDF.RC4 for 'Centos/RHEL'
-
- ```
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.5.0/Patches/xrt_201802.2.1.0_7.5.1804-xrt.rpm -o xrt_201802.2.1.0_7.5.1804-xrt.rpm
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.5.0/Patches/xrt_201802.2.1.0_7.5.1804-aws.rpm -o xrt_201802.2.1.0_7.5.1804-aws.rpm
- sudo yum remove -y xrt
- sudo yum install -y xrt_201802.2.1.0_7.5.1804-xrt.rpm
- sudo yum install -y xrt_201802.2.1.0_7.5.1804-aws.rpm
-
- ```
-
-# Installing Xilinx Runtime (XRT) 2018.2_XDF.RC5
-
- * Applicable SDx Tool Version: 2018.2
-
- * XRT Release Tag: 2018.2_XDF.RC5 (SHA: 65ffad62f427c0bd1bc65b6ea555a810295468b7)
-
- * [Xilinx Runtime (XRT) 2018.2_XDF.RC5 release](https://github.com/Xilinx/XRT/releases/tag/2018.2_XDF.RC5)
-
- ### Instructions to build & install XRT
-
- Pre-requisite commands used to build XRT for AWS F1 platform for this release
-
- ```
- git clone http://www.github.com/aws/aws-fpga.git
- cd aws-fpga
- source sdaccel_setup.sh
- mkdir $SDACCEL_DIR/Runtime
- cd $SDACCEL_DIR/Runtime
- export XRT_PATH="${SDACCEL_DIR}/Runtime/XRT_20182rc5 "
- git clone http://www.github.com/Xilinx/XRT.git -b 2018.2_XDF.RC5 ${XRT_PATH}
- cd ${XRT_PATH}
- sudo ./src/runtime_src/tools/scripts/xrtdeps.sh
- cd build
-
- ```
- Follow [ Xilinx's instructions to build & install XRT on Centos/RedHat & Ubuntu/Debian](https://www.xilinx.com/html_docs/xilinx2018_2_xdf/sdaccel_doc/ejy1538090924727.html) to build XRT for supported OS.
-
- ### Install on Centos/RedHat Linux using prebuilt RPMs
-
- Run following commands to download and install XRT 2018.2_XDF.RC5 for 'Centos/RHEL'
-
- ```
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-xrt.rpm -o xrt_201802.2.1.0_7.5.1804-xrt.rpm
- curl -s https://s3.amazonaws.com/aws-fpga-developer-ami/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-aws.rpm -o xrt_201802.2.1.0_7.5.1804-aws.rpm
- sudo yum remove -y xrt-aws
- sudo yum remove -y xrt
- sudo yum install -y xrt_201802.2.1.0_7.5.1804-xrt.rpm
- sudo yum install -y xrt_201802.2.1.0_7.5.1804-aws.rpm
-
- ```
+# Xilinx Runtime (XRT) and SDx Tool versions
+
+* Xilinx Runtime versions match with the tool that you created your SDAccel AFI with.
+* We provide pre-built RPM's for Centos/RHEL and instructions for building XRT
+* Use the below table as reference to install and use the correct XRT version for your applications.
+
+| Xilinx SDx Tool Version | XRT Release Tag | SHA | `xrt` and `xrt-aws` pre-built RPM's (Centos/RHEL) |
+|---|---|---|---|
+|2019.1| [2019.1.0.3](https://github.com/Xilinx/XRT/tree/2019.1.0.3) | 89e25d51313daac5c322dfb4e84707829306d3fe | [xrt_201910.2.2.0_7.7.1908-xrt.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.7.0/Patches/XRT_2019_1_0_3/xrt_201910.2.2.0_7.7.1908-xrt.rpm) [xrt_201910.2.2.0_7.7.1908-aws.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.7.0/Patches/XRT_2019_1_0_3/xrt_201910.2.2.0_7.7.1908-aws.rpm) |
+|2018.3| [2018.3_RC5](https://github.com/Xilinx/XRT/releases/tag/2018.3_RC5) | 8654da1f0d2bd196c9887bdcfe1479103a93e90a | [xrt_201830.2.1.0_7.6.1810-xrt.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.6.0/Patches/XRT_2018_3_RC5/xrt_201830.2.1.0_7.6.1810-xrt.rpm) [xrt_201830.2.1.0_7.6.1810-aws.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.6.0/Patches/XRT_2018_3_RC5/xrt_201830.2.1.0_7.6.1810-aws.rpm) |
+|2018.2| [2018.2_XDF.RC5](https://github.com/Xilinx/XRT/releases/tag/2018.2_XDF.RC5) | 65ffad62f427c0bd1bc65b6ea555a810295468b7 | [xrt_201802.2.1.0_7.5.1804-xrt.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-xrt.rpm) [xrt_201802.2.1.0_7.5.1804-aws.rpm](https://aws-fpga-developer-ami.s3.amazonaws.com/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-aws.rpm) |
+|2017.4| N/A** | N/A** | N/A** |
+** Use XOCL for 2017.4
+
+# Centos/RHEL build and install steps
+
+```bash
+XRT_RELEASE_TAG=2019.1_RC2 # Substitute XRT_RELEASE_TAG=
+
+git clone https://github.com/aws/aws-fpga.git
+
+cd aws-fpga
+source sdaccel_setup.sh
+cd $SDACCEL_DIR/Runtime
+export XRT_PATH="${SDACCEL_DIR}/Runtime/${XRT_RELEASE_TAG}"
+git clone http://www.github.com/Xilinx/XRT.git -b ${XRT_RELEASE_TAG} ${XRT_PATH}
+
+cd ${XRT_PATH}
+sudo ./src/runtime_src/tools/scripts/xrtdeps.sh
+
+cd build
+scl enable devtoolset-6 bash
+./build.sh
+
+cd Release
+sudo yum reinstall xrt_*.rpm -y
+```
+
+# Centos/RHEL pre-built RPM install steps
+
+### 2019.1
+
+```bash
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.7.0/Patches/XRT_2019_1_RC2/xrt_201910.2.2.0_7.6.1810-xrt.rpm -o xrt.rpm
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.7.0/Patches/XRT_2019_1_RC2/xrt_201910.2.2.0_7.6.1810-aws.rpm -o xrt-aws.rpm
+sudo yum reinstall xrt*.rpm -y
+```
+### 2018.3
+
+```bash
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.6.0/Patches/XRT_2018_3_RC5/xrt_201830.2.1.0_7.6.1810-xrt.rpm -o xrt.rpm
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.6.0/Patches/XRT_2018_3_RC5/xrt_201830.2.1.0_7.6.1810-aws.rpm -o xrt-aws.rpm
+sudo yum reinstall xrt*.rpm -y
+```
+### 2018.2
+
+```bash
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-xrt.rpm -o xrt.rpm
+curl -s https://aws-fpga-developer-ami.s3.amazonaws.com/1.5.0/Patches/XRT_2018_2_XDF_RC5/xrt_201802.2.1.0_7.5.1804-aws.rpm -o xrt-aws.rpm
+sudo yum reinstall xrt*.rpm -y
+```
diff --git a/SDAccel/docs/figure/gui_fig_1.png b/SDAccel/docs/figure/gui_fig_1.png
new file mode 100644
index 00000000..b98b468a
Binary files /dev/null and b/SDAccel/docs/figure/gui_fig_1.png differ
diff --git a/SDAccel/docs/figure/gui_fig_2.png b/SDAccel/docs/figure/gui_fig_2.png
new file mode 100644
index 00000000..884371fb
Binary files /dev/null and b/SDAccel/docs/figure/gui_fig_2.png differ
diff --git a/SDAccel/docs/figure/gui_fig_3.png b/SDAccel/docs/figure/gui_fig_3.png
new file mode 100644
index 00000000..83d61d20
Binary files /dev/null and b/SDAccel/docs/figure/gui_fig_3.png differ
diff --git a/SDAccel/docs/README_third_party.md b/SDAccel/examples/3rd_party/README.md
similarity index 80%
rename from SDAccel/docs/README_third_party.md
rename to SDAccel/examples/3rd_party/README.md
index 1e8fc873..d50cca85 100644
--- a/SDAccel/docs/README_third_party.md
+++ b/SDAccel/examples/3rd_party/README.md
@@ -2,7 +2,6 @@
* In the interest of providing more examples for the user, we present this guide that tells how to port third party OpenCL examples to the SDAccel flow.
* In this guide, we show the changes necessary to port third party host code and kernel code for 2 different examples.
* We also show some [differences between the third party OpenCL and Xilinx SDAccel implementations](#xilinx-and-third-party-implementation-differences) that the user should be aware of.
-* There is a third example (matrix_mult, not discussed here) available at SDAccel/examples/3rd_party.
## The file structure of the third party examples used in this guide.
* The following shows the common file structure of the third party examples used in this guide.
@@ -27,16 +26,16 @@ common/src/AOCLUtils/options.cpp
## Changes to the host code.
- * The changes needed for the **vector_addition** host code can be found [here](../examples/3rd_party/vector_addition) in the file named vector_addition_main.cpp.diff.
- * The changes needed for the **fft1d** host code can be found [here](../examples/3rd_party/fft1d) in the file named fft1d_main.cpp.diff.
- * All the modified dependency files can be found in the [SDAccel/examples/3rd_party/common](../examples/3rd_party/common) directory.
+ * The changes needed for the **vector_addition** host code can be found [here](vector_addition) in the file named vector_addition_main.cpp.diff.
+ * The changes needed for the **fft1d** host code can be found [here](fft1d) in the file named fft1d_main.cpp.diff.
+ * All the modified dependency files can be found in the [SDAccel/examples/3rd_party/common](common) directory.
## Changes to the kernel code.
* The kernel code, found in the <example_name>/device directory, will most likely need modifications.
* The **vector addition** kernel does not need changes.
* The **fft1d** example needs several changes due to the differences between the third party and Xilinx implementations.
-* The changes needed for the fft1d.cl file are found [here](../examples/3rd_party/fft1d) in the file named fft1d_fft1d.cl.diff.
+* The changes needed for the fft1d.cl file are found [here](fft1d) in the file named fft1d_fft1d.cl.diff.
* See table below regarding [implementation differences between third party and Xilinx](#xilinx-and-third-party-implementation-differences).
* The <example_name>/device/twid_radix4_8.cl file will get many warnings about casting from double to float.
@@ -46,18 +45,18 @@ common/src/AOCLUtils/options.cpp
sed 's/\([0-9]\)\( \{0,\}[,}]\)/\1f\2/g' twid_radix4_8.cl > tmp
mv tmp twid_radix4_8.cl
```
-* The script above can be found [here](../examples/3rd_party/fft1d) named cast_float_const.sh.
+* The script above can be found [here](fft1d) named cast_float_const.sh.
## Changes to the Makefile.
* The third party Makefile can be replaced by a version that is similar to the SDAccel example Makefiles.
-* For example, for the third party **vector_addition** code, the Makefile can be found [here](../examples/3rd_party/vector_addition).
-* The **fft1d** example Makefile can be found [here](../examples/3rd_party/fft1d).
+* For example, for the third party **vector_addition** code, the Makefile can be found [here](vector_addition).
+* The **fft1d** example Makefile can be found [here](fft1d).
## Compiling and running.
* The steps to compile and run would be the same as those used for the SDAccel examples with the exception that the host program would need the -hw=<mode> switch when running in emulation mode.
-* For the complete guide on compiling and running the SDAccel examples, see [this](../README.md).
+* For the complete guide on compiling and running the SDAccel examples, see [this](../../README.md).
* To run in software emulation mode, use the following commands.
```
@@ -80,8 +79,7 @@ make TARGETS=hw DEVICES=$AWS_PLATFORM all
./main
```
-* For more information on running this example on an F1 instance, see [this](../README.md#runonf1).
-
+* For more information on running this example on an F1 instance, see [this](../../README.md#runonf1).
## Xilinx and third party Implementation Differences
#### Host Code
@@ -105,18 +103,12 @@ make TARGETS=hw DEVICES=$AWS_PLATFORM all
| declares and initializes an struct object together | declare an struct object and then initialize it separately |
## SUPPORT
-For more information check here:
-[SDAccel User Guides][]
+For more information check the [SDAccel User Guides](http://www.xilinx.com/support/documentation-navigation/development-tools/software-development/sdaccel.html?resultsTablePreSelect=documenttype:SeeAll#documentation)
-For questions and to get help on this project or your own projects, visit the [SDAccel Forums][].
+For questions and to get help on this project or your own projects, visit the [SDAccel Forums](https://forums.xilinx.com/t5/SDAccel/bd-p/SDx)
## REVISION HISTORY
Date | Readme Version | Revision Description
--------|----------------|-------------------------
SEP2017 | 1.0 | Initial release
-
-
-
-[SDAccel Forums]: https://forums.xilinx.com/t5/SDAccel/bd-p/SDx
-[SDAccel User Guides]: http://www.xilinx.com/support/documentation-navigation/development-tools/software-development/sdaccel.html?resultsTablePreSelect=documenttype:SeeAll#documentation
diff --git a/SDAccel/examples/3rd_party/matrix_mult/Makefile b/SDAccel/examples/3rd_party/matrix_mult/Makefile
deleted file mode 100644
index d459aff2..00000000
--- a/SDAccel/examples/3rd_party/matrix_mult/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-## Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-##
-## Licensed under the Amazon Software License (the "License"). You may not use
-## this file except in compliance with the License. A copy of the License is
-## located at
-##
-## http://aws.amazon.com/asl/
-##
-## or in the "license" file accompanying this file. This file is distributed on
-## an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
-## implied. See the License for the specific language governing permissions and
-## limitations under the License.
-
-
-COMMON_REPO := $(SDACCEL_DIR)/examples/xilinx
-
-include $(COMMON_REPO)/utility/boards.mk
-include $(COMMON_REPO)/libs/xcl/xcl.mk
-include $(COMMON_REPO)/libs/opencl/opencl.mk
-
-main_SRCS=$(wildcard host/src/*.cpp ../common/src/AOCLUtils/*.cpp) $(xcl_SRCS)
-main_HDRS=$(xcl_HDRS)
-
-main_CXXFLAGS=$(xcl_CXXFLAGS) $(opencl_CXXFLAGS) -Ihost/inc/ -I../common/inc/
-main_LDFLAGS=$(opencl_LDFLAGS) -lrt
-
-EXES=main
-
-# Kernel
-matrix_mult_SRCS=./device/matrix_mult.cl
-matrix_mult_CLFLAGS= -k matrix_mult -Ihost/inc/
-#Specifyinng Fifo depth for Dataflow
-matrix_mult_CLFLAGS+=--xp "param:compiler.xclDataflowFifoDepth=32"
-
-XOS=matrix_mult
-
-# xclbin
-matrix_mult_XOS=matrix_mult
-
-XCLBINS=matrix_mult
-
-# check
-check_EXE=main
-check_XCLBINS=matrix_mult
-
-CHECKS=check
-
-# Compilation flags
-ifeq ($(DEBUG),1)
-CXXFLAGS += -g
-else
-CXXFLAGS += -O2
-endif
-
-# Compiler
-#CXX := g++
-
-include $(COMMON_REPO)/utility/rules.mk
diff --git a/SDAccel/examples/3rd_party/matrix_mult/README.md b/SDAccel/examples/3rd_party/matrix_mult/README.md
deleted file mode 100644
index 228c3161..00000000
--- a/SDAccel/examples/3rd_party/matrix_mult/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Third party matrix multiply OpcnCL example.
-## Compiling and running.
-* The steps to compile and run are the same as those used for the SDAccel examples with the exception that the host program would need the -hw=<mode> switch when running in emulation mode.
-
-
-* For the complete guide on compiling and running the SDAccel examples, see [this](../../../README.md).
-
-* To run in software emulation mode, use the following commands.
- ```
-make clean
-source $XILINX_SDX/settings64.sh
-make TARGETS=sw_emu DEVICES=$AWS_PLATFORM all
-./main -hw=sw_emu
-```
-
-* To run in hardware emulation mode, use the following commands.
- ```
-make clean
-source $XILINX_SDX/settings64.sh
-make TARGETS=hw_emu DEVICES=$AWS_PLATFORM all
-./main -hw=hw_emu
-```
-
-* To run on an F1 instance, use the following commands.
- ```
-make clean
-source $XILINX_SDX/settings64.sh
-make TARGETS=hw DEVICES=$AWS_PLATFORM all
-./main
-```
-
-* For more information on running this example on an F1 instance, see [this](../../../README.md#runonf1).
\ No newline at end of file
diff --git a/SDAccel/examples/3rd_party/matrix_mult/device/matrix_mult.cl b/SDAccel/examples/3rd_party/matrix_mult/device/matrix_mult.cl
deleted file mode 100644
index 42dc429f..00000000
--- a/SDAccel/examples/3rd_party/matrix_mult/device/matrix_mult.cl
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (C) 2013-2016 Altera Corporation, San Jose, California, USA. All rights reserved.
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this
-// software and associated documentation files (the "Software"), to deal in the Software
-// without restriction, including without limitation the rights to use, copy, modify, merge,
-// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to
-// whom the Software is furnished to do so, subject to the following conditions:
-// The above copyright notice and this permission notice shall be included in all copies or
-// substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-// OTHER DEALINGS IN THE SOFTWARE.
-//
-// This agreement shall be governed in all respects by the laws of the State of California and
-// by the laws of the United States of America.
-
-// This kernel computes C = A * B, where
-// A is a N x K matrix
-// B is a K x M matrix
-// C is a N x M matrix
-// All dimensions must be a multiple of BLOCK_SIZE (defined below).
-//
-// The ND-range is two-dimensional and corresponds to the dimensions of matrix
-// C. Each work-item computes one element of the output matrix.
-//
-// The implemented algorithm uses blocking to take advantage of data reuse
-// across multiple elements in matrix C. This is just like the standard loop
-// tiling optimization often used in matrix multiplication implementations.
-//
-// This kernel is intended to be compiled with the following compiler flags:
-// --no-interleaving default
-// This flag indicates that the global memory is divided into two logical
-// banks and allows the host program to assign buffers to specific buffers.
-// This allows the host to manage the load on each memory bank, usually
-// to maximize the memory bandwidth usage.
-//
-// This flag is used for matrix multiplication because there are
-// two primary memory accesses: reads from matrix A and reads from
-// matrix B. To maximize memory bandwidth, the two input matrices
-// are placed in different memory banks, which ensures that there is no
-// contention when trying to read elements from both matrices
-// simultaneously.
-//
-// -fp-relaxed=true
-// This flag enables the order of additions in the dot product
-// computation within a block to be rearranged. This enables the additions
-// to be computed more efficiently in hardware, using a tree structure
-// instead of a vine.
-//
-// As a simple example, take the addition of four values: a0 + a1 + a2 + a3.
-// The default implementation (without -fp-relaxed=true) is:
-// (((a0 + a1) + a2) + a3)
-// which matches the standard ordering of operations. In hardware, this
-// looks like:
-// a0 a1
-// |-+-|
-// | a2
-// |-+-|
-// | a3
-// |-+-|
-// |
-//
-// With -fp-relaxed=true, the implementation is a balanced tree:
-// ((a0 + a1) + (a2 + a3))
-// In hardware, this looks like:
-// a0 a1 a2 a3
-// |-+-| |-+-|
-// | |
-// |----+----|
-// |
-//
-// There are two values that need to be defined in the preprocessor.
-// BLOCK_SIZE
-// The dimension of the block used in the core computation
-// is BLOCK_SIZE x BLOCK_SIZE. This is defined in the host
-// include file because the host needs to know too (just to
-// ensure that the matrix sizes are a multiple of the block
-// size.
-// SIMD_WORK_ITEMS
-// This value tells the compiler how many work-items in the work-group
-// in a SIMD fashion. In the context of matrix multiplication, this
-// value indicates how many output elements will be computed
-// in a SIMD manner. BLOCK_SIZE must be a multiple of SIMD_WORK_ITEMS.
-// See the Optimization Guide for details about this attribute.
-//
-// The combination of these values determines the number of floating-point
-// operations per cycle.
-
-#include "matrixMult.h"
-
-#ifndef SIMD_WORK_ITEMS
-#define SIMD_WORK_ITEMS 4 // default value
-#endif
-
-__kernel
-__attribute((reqd_work_group_size(BLOCK_SIZE,BLOCK_SIZE,1)))
-__attribute((num_simd_work_items(SIMD_WORK_ITEMS)))
-void matrix_mult( // Input and output matrices
- __global float *restrict C,
- __global float *A,
- __global float *B,
- // Widths of matrices.
- int A_width, int B_width)
-{
- // Local storage for a block of input matrices A and B
- __local float A_local[BLOCK_SIZE][BLOCK_SIZE];
- __local float B_local[BLOCK_SIZE][BLOCK_SIZE];
-
- // Block index
- int block_x = get_group_id(0);
- int block_y = get_group_id(1);
-
- // Local ID index (offset within a block)
- int local_x = get_local_id(0);
- int local_y = get_local_id(1);
-
- // Compute loop bounds
- int a_start = A_width * BLOCK_SIZE * block_y;
- int a_end = a_start + A_width - 1;
- int b_start = BLOCK_SIZE * block_x;
-
- float running_sum = 0.0f;
-
- // Compute the matrix multiplication result for this output element. Each
- // loop iteration processes one block of the matrix.
- for (int a = a_start, b = b_start; a <= a_end; a += BLOCK_SIZE, b += (BLOCK_SIZE * B_width))
- {
- // Load the matrices to local memory. Note that the (x, y) indices
- // are swapped for A_local and B_local. This affects the reads from
- // A_local and B_local below and result in more efficient hardware.
- //
- // This is actually an optimization that the compiler can perform,
- // but is shown here for illustration purposes.
- A_local[local_y][local_x] = A[a + A_width * local_y + local_x];
- B_local[local_x][local_y] = B[b + B_width * local_y + local_x];
-
- // Wait for the entire block to be loaded.
- barrier(CLK_LOCAL_MEM_FENCE);
-
- // Do the dot product accumulation within this block. Fully unroll the loop.
- // As a result of the swap of indices above, memory accesses to
- // A_local and B_local are very efficient because each loop iteration
- // accesses consecutive elements. This can be seen by unrolling the
- // loop and analyzing the regions that are loaded:
- // A_local[local_y][0..BLOCK_SIZE-1] and
- // B_local[local_x][0..BLOCK_SIZE-1]
- __attribute__((opencl_unroll_hint()))
- for (int k = 0; k < BLOCK_SIZE; ++k)
- {
- running_sum += A_local[local_y][k] * B_local[local_x][k];
- }
-
- // Wait for the block to be fully consumed before loading the next
- // block.
- barrier(CLK_LOCAL_MEM_FENCE);
- }
-
- // Store result in matrix C
- C[get_global_id(1) * get_global_size(0) + get_global_id(0)] = running_sum;
-}
diff --git a/SDAccel/examples/3rd_party/matrix_mult/host/inc/matrixMult.h b/SDAccel/examples/3rd_party/matrix_mult/host/inc/matrixMult.h
deleted file mode 100644
index 76f8f0ad..00000000
--- a/SDAccel/examples/3rd_party/matrix_mult/host/inc/matrixMult.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2013-2016 Altera Corporation, San Jose, California, USA. All rights reserved.
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this
-// software and associated documentation files (the "Software"), to deal in the Software
-// without restriction, including without limitation the rights to use, copy, modify, merge,
-// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to
-// whom the Software is furnished to do so, subject to the following conditions:
-// The above copyright notice and this permission notice shall be included in all copies or
-// substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-// OTHER DEALINGS IN THE SOFTWARE.
-//
-// This agreement shall be governed in all respects by the laws of the State of California and
-// by the laws of the United States of America.
-
-#ifndef MATRIXMULT_H
-#define MATRIXMULT_H
-
-// Block size. Affects the kernel, so if this value changes, the kernel
-// needs to be recompiled.
-#ifndef BLOCK_SIZE
-#define BLOCK_SIZE 64 // default value
-#endif
-
-#endif
-
diff --git a/SDAccel/examples/3rd_party/matrix_mult/host/src/main.cpp b/SDAccel/examples/3rd_party/matrix_mult/host/src/main.cpp
deleted file mode 100644
index 4481372d..00000000
--- a/SDAccel/examples/3rd_party/matrix_mult/host/src/main.cpp
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright (C) 2013-2016 Altera Corporation, San Jose, California, USA. All rights reserved.
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this
-// software and associated documentation files (the "Software"), to deal in the Software
-// without restriction, including without limitation the rights to use, copy, modify, merge,
-// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to
-// whom the Software is furnished to do so, subject to the following conditions:
-// The above copyright notice and this permission notice shall be included in all copies or
-// substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-// OTHER DEALINGS IN THE SOFTWARE.
-//
-// This agreement shall be governed in all respects by the laws of the State of California and
-// by the laws of the United States of America.
-
-///////////////////////////////////////////////////////////////////////////////////
-// This host program executes a matrix multiplication kernel to perform:
-// C = A * B
-// where A is a N x K matrix, B is a K x M matrix and C is a N x M matrix.
-// All dimensions must be a multiple of BLOCK_SIZE, which affects the
-// underlying kernel.
-//
-// This host program supports partitioning the problem across multiple OpenCL
-// devices if available. If there are M available devices, the problem is
-// divided so that each device operates on N/M rows (with
-// processed by each device is . The host program
-// assumes that all devices are of the same type (that is, the same binary can
-// be used), but the code can be generalized to support different device types
-// easily.
-//
-// Verification is performed against the same computation on the host CPU.
-///////////////////////////////////////////////////////////////////////////////////
-
-#include
-#include
-#include
-#include "CL/opencl.h"
-#include "AOCLUtils/aocl_utils.h"
-#include "matrixMult.h"
-
-using namespace aocl_utils;
-
-// OpenCL runtime configuration
-cl_platform_id platform = NULL;
-unsigned num_devices = 0;
-scoped_array device; // num_devices elements
-cl_context context = NULL;
-scoped_array queue; // num_devices elements
-cl_program program = NULL;
-scoped_array kernel; // num_devices elements
-#if USE_SVM_API == 0
-scoped_array input_a_buf; // num_devices elements
-scoped_array input_b_buf; // num_devices elements
-scoped_array output_buf; // num_devices elements
-#endif /* USE_SVM_API == 0 */
-
-// Problem data.
-unsigned A_height = 32 * BLOCK_SIZE;
-unsigned A_width = 16 * BLOCK_SIZE;
-const unsigned &B_height = A_width;
-unsigned B_width = 16 * BLOCK_SIZE;
-const unsigned &C_height = A_height;
-const unsigned &C_width = B_width;
-std::string hwtype = "hw";
-
-#if USE_SVM_API == 0
-scoped_array > input_a; // num_devices elements
-scoped_aligned_ptr input_b;
-scoped_array > output; // num_devices elements
-#else
-scoped_array > input_a; // num_devices elements
-scoped_SVM_aligned_ptr input_b;
-scoped_array > output; // num_devices elements
-#endif /* USE_SVM_API == 0 */
-scoped_array ref_output;
-scoped_array rows_per_device; // num_devices elements
-
-// Function prototypes
-float rand_float();
-bool init_opencl();
-void init_problem();
-void run();
-void compute_reference();
-void verify();
-void cleanup();
-
-// Entry point.
-int main(int argc, char **argv) {
- Options options(argc, argv);
-
- if(options.has("ah")) {
- A_height = options.get("ah");
- }
- if(options.has("aw")) {
- A_width = options.get("aw");
- }
- if(options.has("bw")) {
- B_width = options.get("bw");
- }
- if(options.has("hw")) {
- hwtype = options.get("hw");
- }
-
- printf("Matrix sizes:\n A: %d x %d\n B: %d x %d\n C: %d x %d\n",
- A_height, A_width, B_height, B_width, C_height, C_width);
-
- // Spot check matrix sizes. They all must be a multiple of BLOCK_SIZE,
- // although it is relatively straightforward to handle non-multiples
- // by adding padding. For simplicity, this example does not pad.
- if((A_height % BLOCK_SIZE) != 0 || (A_width % BLOCK_SIZE) != 0 ||
- (B_height % BLOCK_SIZE) != 0 || (B_width % BLOCK_SIZE) != 0 ||
- (C_height % BLOCK_SIZE) != 0 || (C_width % BLOCK_SIZE) != 0) {
- printf("Matrix sizes must be a multiple of %d.\n", BLOCK_SIZE);
- return -1;
- }
-
- // Initialize OpenCL.
- if(!init_opencl()) {
- return -1;
- }
-
- // Initialize the problem data.
- // Requires the number of devices to be known.
- init_problem();
-
- // Run the kernel.
- run();
-
- // Free the resources allocated
- cleanup();
-
- return 0;
-}
-
-/////// HELPER FUNCTIONS ///////
-
-// Randomly generate a floating-point number between -10 and 10.
-float rand_float() {
- return float(rand()) / float(RAND_MAX) * 20.0f - 10.0f;
-}
-
-// Initializes the OpenCL objects.
-bool init_opencl() {
- cl_int status;
-
- printf("Initializing OpenCL\n");
-
- if(!setCwdToExeDir()) {
- return false;
- }
-
- // Get the OpenCL platform.
- platform = findPlatform("Xilinx");
- if(platform == NULL) {
- printf("ERROR: Unable to find Xilinx OpenCL platform.\n");
- return false;
- }
-
- // Query the available OpenCL device.
- device.reset(getDevices(platform, CL_DEVICE_TYPE_ALL, &num_devices));
- printf("Platform: %s\n", getPlatformName(platform).c_str());
- printf("Using %d device(s)\n", num_devices);
- for(unsigned i = 0; i < num_devices; ++i) {
- printf(" %s\n", getDeviceName(device[i]).c_str());
- }
-
- // Create the context.
- context = clCreateContext(NULL, num_devices, device, &oclContextCallback, NULL, &status);
- checkError(status, "Failed to create context");
-
- // Create the program for all device. Use the first device as the
- // representative device (assuming all device are of the same type).
- std::string fname = "xclbin/matrix_mult."+ hwtype + "." + VERSION_STR;
- std::string binary_file = getBoardBinaryFile(fname.c_str(), device[0]);
- printf("Using XCLBIN: %s\n", binary_file.c_str());
- program = createProgramFromBinary(context, binary_file.c_str(), device, num_devices);
-
- // Build the program that was just created.
- status = clBuildProgram(program, 0, NULL, "", NULL, NULL);
- checkError(status, "Failed to build program");
-
- // Create per-device objects.
- queue.reset(num_devices);
- kernel.reset(num_devices);
- rows_per_device.reset(num_devices);
-#if USE_SVM_API == 0
- input_a_buf.reset(num_devices);
- input_b_buf.reset(num_devices);
- output_buf.reset(num_devices);
-#endif /* USE_SVM_API == 0 */
-
- const unsigned num_block_rows = C_height / BLOCK_SIZE;
-
- for(unsigned i = 0; i < num_devices; ++i) {
- // Command queue.
- queue[i] = clCreateCommandQueue(context, device[i], CL_QUEUE_PROFILING_ENABLE, &status);
- checkError(status, "Failed to create command queue");
-
- // Kernel.
- const char *kernel_name = "matrix_mult";
- kernel[i] = clCreateKernel(program, kernel_name, &status);
- checkError(status, "Failed to create kernel");
-
- // Determine the number of rows processed by this device.
- // First do this computation in block-rows.
- rows_per_device[i] = num_block_rows / num_devices; // this is the number of block-rows
-
- // Spread out the remainder of the block-rows over the first
- // N % num_devices.
- if(i < (num_block_rows % num_devices)) {
- rows_per_device[i]++;
- }
-
- // Multiply by BLOCK_SIZE to get the actual number of rows.
- rows_per_device[i] *= BLOCK_SIZE;
-
-#if USE_SVM_API == 0
- // Input buffers.
- // For matrix A, each device only needs the rows corresponding
- // to the rows of the output matrix. We specifically
- // assign this buffer to the first bank of global memory.
- input_a_buf[i] = clCreateBuffer(context, CL_MEM_READ_ONLY , // remove for now // | CL_MEM_BANK_1_ALTERA,
- rows_per_device[i] * A_width * sizeof(float), NULL, &status);
- checkError(status, "Failed to create buffer for input A");
-
- // For matrix B, each device needs the whole matrix. We specifically
- // assign this buffer to the second bank of global memory.
- input_b_buf[i] = clCreateBuffer(context, CL_MEM_READ_ONLY, // remove for now // | CL_MEM_BANK_2_ALTERA,
- B_height * B_width * sizeof(float), NULL, &status);
- checkError(status, "Failed to create buffer for input B");
-
- // Output buffer. This is matrix C, for the rows that are computed by this
- // device. We assign this buffer to the first bank of global memory,
- // although it is not material to performance to do so because
- // the reads from the input matrices are far more frequent than the
- // write to the output matrix.
- output_buf[i] = clCreateBuffer(context, CL_MEM_WRITE_ONLY, // remove for now // | CL_MEM_BANK_1_ALTERA,
- rows_per_device[i] * C_width * sizeof(float), NULL, &status);
- checkError(status, "Failed to create buffer for output");
-#else
- cl_device_svm_capabilities caps = 0;
-
- status = clGetDeviceInfo(
- device[i],
- CL_DEVICE_SVM_CAPABILITIES,
- sizeof(cl_device_svm_capabilities),
- &caps,
- 0
- );
- checkError(status, "Failed to get device info");
-
- if (!(caps & CL_DEVICE_SVM_COARSE_GRAIN_BUFFER)) {
- printf("The host was compiled with USE_SVM_API, however the device currently being targeted does not support SVM.\n");
- // Free the resources allocated
- cleanup();
- return false;
- }
-#endif /* USE_SVM_API == 0 */
- }
-
- return true;
-}
-
-// Initialize the data for the problem. Requires num_devices to be known.
-void init_problem() {
- if(num_devices == 0) {
- checkError(-1, "No devices");
- }
-
- // Generate input matrices A and B. For matrix A, we divide up the host
- // buffers so that the buffers are aligned for each device. The whole of
- // matrix B is used by each device, so it does not need to be divided.
- printf("Generating input matrices\n");
- input_a.reset(num_devices);
- output.reset(num_devices);
-#if USE_SVM_API == 0
- for(unsigned i = 0; i < num_devices; ++i) {
- input_a[i].reset(rows_per_device[i] * A_width);
- output[i].reset(rows_per_device[i] * C_width);
-
- for(unsigned j = 0; j < rows_per_device[i] * A_width; ++j) {
- input_a[i][j] = rand_float();
- }
- }
-
- input_b.reset(B_height * B_width);
- for(unsigned i = 0; i < B_height * B_width; ++i) {
- input_b[i] = rand_float();
- }
-#else
- for(unsigned i = 0; i < num_devices; ++i) {
- input_a[i].reset(context, rows_per_device[i] * A_width);
- output[i].reset(context, rows_per_device[i] * C_width);
-
- cl_int status;
-
- status = clEnqueueSVMMap(queue[i], CL_TRUE, CL_MAP_WRITE,
- (void *)input_a[i], rows_per_device[i] * A_width * sizeof(float), 0, NULL, NULL);
- checkError(status, "Failed to map input A");
-
- for(unsigned j = 0; j < rows_per_device[i] * A_width; ++j) {
- input_a[i][j] = rand_float();
- }
-
- status = clEnqueueSVMUnmap(queue[i], (void *)input_a[i], 0, NULL, NULL);
- checkError(status, "Failed to unmap input A");
- }
-
- input_b.reset(context, B_height * B_width);
-
- cl_int status;
-
- for (unsigned i = 0; i < num_devices; ++i) {
- status = clEnqueueSVMMap(queue[i], CL_TRUE, CL_MAP_WRITE,
- (void *)input_b, B_height * B_width * sizeof(float), 0, NULL, NULL);
- checkError(status, "Failed to map input B");
- }
-
- for(unsigned i = 0; i < B_height * B_width; ++i) {
- input_b[i] = rand_float();
- }
-
- for (unsigned i = 0; i < num_devices; ++i) {
- status = clEnqueueSVMUnmap(queue[i], (void *)input_b, 0, NULL, NULL);
- checkError(status, "Failed to unmap input B");
- }
-#endif /* USE_SVM_API == 0 */
-}
-
-void run() {
- cl_int status;
-
-#if USE_SVM_API == 0
- // Transfer inputs to each device. Each of the host buffers supplied to
- // clEnqueueWriteBuffer here is already aligned to ensure that DMA is used
- // for the host-to-device transfer.
- for(unsigned i = 0; i < num_devices; ++i) {
- status = clEnqueueWriteBuffer(queue[i], input_a_buf[i], CL_FALSE,
- 0, rows_per_device[i] * A_width * sizeof(float), input_a[i], 0, NULL, NULL);
- checkError(status, "Failed to transfer input A");
-
- status = clEnqueueWriteBuffer(queue[i], input_b_buf[i], CL_FALSE,
- 0, B_width * B_height * sizeof(float), input_b, 0, NULL, NULL);
- checkError(status, "Failed to transfer input B");
- }
-
- // Wait for all queues to finish.
- for(unsigned i = 0; i < num_devices; ++i) {
- clFinish(queue[i]);
- }
-#endif /* USE_SVM_API == 0 */
-
- // Launch kernels.
- // This is the portion of time that we'll be measuring for throughput
- // benchmarking.
- scoped_array kernel_event(num_devices);
-
- const double start_time = getCurrentTimestamp();
- for(unsigned i = 0; i < num_devices; ++i) {
- // Set kernel arguments.
- unsigned argi = 0;
-
-#if USE_SVM_API == 0
- status = clSetKernelArg(kernel[i], argi++, sizeof(cl_mem), &output_buf[i]);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- status = clSetKernelArg(kernel[i], argi++, sizeof(cl_mem), &input_a_buf[i]);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- status = clSetKernelArg(kernel[i], argi++, sizeof(cl_mem), &input_b_buf[i]);
- checkError(status, "Failed to set argument %d", argi - 1);
-#else
- status = clSetKernelArgSVMPointer(kernel[i], argi++, (void*)output[i]);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- status = clSetKernelArgSVMPointer(kernel[i], argi++, (void*)input_a[i]);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- status = clSetKernelArgSVMPointer(kernel[i], argi++, (void*)input_b);
- checkError(status, "Failed to set argument %d", argi - 1);
-#endif /* USE_SVM_API == 0 */
-
- status = clSetKernelArg(kernel[i], argi++, sizeof(A_width), &A_width);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- status = clSetKernelArg(kernel[i], argi++, sizeof(B_width), &B_width);
- checkError(status, "Failed to set argument %d", argi - 1);
-
- // Enqueue kernel.
- // Use a global work size corresponding to the size of the output matrix.
- // Each work-item computes the result for one value of the output matrix,
- // so the global work size has the same dimensions as the output matrix.
- //
- // The local work size is one block, so BLOCK_SIZE x BLOCK_SIZE.
- //
- // Events are used to ensure that the kernel is not launched until
- // the writes to the input buffers have completed.
- const size_t global_work_size[2] = {C_width, rows_per_device[i]};
- const size_t local_work_size[2] = {BLOCK_SIZE, BLOCK_SIZE};
- printf("Launching for device %d (global size: %zd, %zd)\n", i, global_work_size[0], global_work_size[1]);
-
- status = clEnqueueNDRangeKernel(queue[i], kernel[i], 2, NULL,
- global_work_size, local_work_size, 0, NULL, &kernel_event[i]);
- checkError(status, "Failed to launch kernel");
- }
-
- // Wait for all kernels to finish.
- clWaitForEvents(num_devices, kernel_event);
-
- const double end_time = getCurrentTimestamp();
- const double total_time = end_time - start_time;
-
- // Wall-clock time taken.
- printf("\nTime: %0.3f ms\n", total_time * 1e3);
-
- // Get kernel times using the OpenCL event profiling API.
- for(unsigned i = 0; i < num_devices; ++i) {
- cl_ulong time_ns = getStartEndTime(kernel_event[i]);
- printf("Kernel time (device %d): %0.3f ms\n", i, double(time_ns) * 1e-6);
- }
-
- // Compute the throughput (GFLOPS).
- // There are C_width * C_height output values, with each value
- // computed using A_width multiplies and adds.
- const float flops = (float)(2.0f * C_width * C_height * A_width / total_time);
- printf("\nThroughput: %0.2f GFLOPS\n\n", flops * 1e-9);
-
- // Release kernel events.
- for(unsigned i = 0; i < num_devices; ++i) {
- clReleaseEvent(kernel_event[i]);
- }
-
- // Read the result.
- for(unsigned i = 0; i < num_devices; ++i) {
-#if USE_SVM_API == 0
- status = clEnqueueReadBuffer(queue[i], output_buf[i], CL_TRUE,
- 0, rows_per_device[i] * C_width * sizeof(float), output[i], 0, NULL, NULL);
- checkError(status, "Failed to read output matrix");
-#else
- status = clEnqueueSVMMap(queue[i], CL_TRUE, CL_MAP_READ,
- (void *)output[i], rows_per_device[i] * C_width * sizeof(float), 0, NULL, NULL);
- checkError(status, "Failed to map output");
-#endif /* USE_SVM_API == 0 */
- }
-
- // Verify results.
- compute_reference();
- verify();
-#if USE_SVM_API == 1
- for (unsigned i = 0; i < num_devices; ++i) {
- status = clEnqueueSVMUnmap(queue[i], (void *)output[i], 0, NULL, NULL);
- checkError(status, "Failed to unmap output");
- }
-#endif /* USE_SVM_API == 1 */
-}
-
-void compute_reference() {
- // Compute the reference output.
- printf("Computing reference output\n");
- ref_output.reset(C_height * C_width);
-
- for(unsigned y = 0, dev_index = 0; y < C_height; ++dev_index) {
- for(unsigned yy = 0; yy < rows_per_device[dev_index]; ++yy, ++y) {
- for(unsigned x = 0; x < C_width; ++x) {
- // Compute result for C(y, x)
- float sum = 0.0f;
- for(unsigned k = 0; k < A_width; ++k) {
- sum += input_a[dev_index][yy * A_width + k] * input_b[k * B_width + x];
- }
- ref_output[y * C_width + x] = sum;
- }
- }
- }
-}
-
-void verify() {
- printf("Verifying\n");
-
- // Compute the L^2-Norm of the difference between the output and reference
- // output matrices and compare it against the L^2-Norm of the reference.
- float diff = 0.0f;
- float ref = 0.0f;
- for(unsigned y = 0, dev_index = 0; y < C_height; ++dev_index) {
- for(unsigned yy = 0; yy < rows_per_device[dev_index]; ++yy, ++y) {
- for(unsigned x = 0; x < C_width; ++x) {
- const float o = output[dev_index][yy * C_width + x];
- const float r = ref_output[y * C_width + x];
- const float d = o - r;
- diff += d * d;
- ref += r * r;
- }
- }
- }
-
- const float diff_l2norm = sqrtf(diff);
- const float ref_l2norm = sqrtf(ref);
- const float error = diff_l2norm / ref_l2norm;
- const bool pass = error < 1e-6;
- printf("Verification: %s\n", pass ? "PASS" : "FAIL");
- if(!pass) {
- printf("Error (L^2-Norm): %0.3g\n", error);
- }
-}
-
-// Free the resources allocated during initialization
-void cleanup() {
- for(unsigned i = 0; i < num_devices; ++i) {
- if(kernel && kernel[i]) {
- clReleaseKernel(kernel[i]);
- }
- if(queue && queue[i]) {
- clReleaseCommandQueue(queue[i]);
- }
-#if USE_SVM_API == 0
- if(input_a_buf && input_a_buf[i]) {
- clReleaseMemObject(input_a_buf[i]);
- }
- if(input_b_buf && input_b_buf[i]) {
- clReleaseMemObject(input_b_buf[i]);
- }
- if(output_buf && output_buf[i]) {
- clReleaseMemObject(output_buf[i]);
- }
-#else
- if(input_a[i].get())
- input_a[i].reset();
- if(output[i].get())
- output[i].reset();
-#endif /* USE_SVM_API == 0 */
- }
-#if USE_SVM_API == 1
- if(input_b.get())
- input_b.reset();
-#endif /* USE_SVM_API == 1 */
-
- if(program) {
- clReleaseProgram(program);
- }
- if(context) {
- clReleaseContext(context);
- }
-}
-
diff --git a/SDAccel/examples/3rd_party/vector_addition/vector_addition_main.cpp.diff b/SDAccel/examples/3rd_party/vector_addition/vector_addition_main.cpp.diff
index 9d850916..94075d89 100644
--- a/SDAccel/examples/3rd_party/vector_addition/vector_addition_main.cpp.diff
+++ b/SDAccel/examples/3rd_party/vector_addition/vector_addition_main.cpp.diff
@@ -1,22 +1,45 @@
---- third_party/vector_add/host/src/main.cpp 2017-05-09 22:47:50.000000000 +0000
-+++ sdaccel/vector_add/host/src/main.cpp 2017-09-12 18:55:22.172000000 +0000
-@@ -70,0 +71,2 @@
+--- third_party/vector_add/host/src/main.cpp 2018-02-12 17:55:18.000000000 +0000
++++ aws/vector_add/host/src/main.cpp 2019-09-15 22:53:15.593553611 +0000
+@@ -67,6 +67,7 @@
+ #endif /* USE_SVM_API == 0 */
+ scoped_array > ref_output; // num_devices elements
+ scoped_array n_per_device; // num_devices elements
+std::string hwtype = "hw";
-+
-@@ -85,0 +88,3 @@
+
+ // Function prototypes
+ float rand_float();
+@@ -84,6 +85,9 @@
+ N = options.get("n");
+ }
+
+ if(options.has("hw")) {
+ hwtype = options.get("hw");
+ }
-@@ -123 +128 @@
-- platform = findPlatform("Intel");
-+ platform = findPlatform("Xilinx");
-@@ -125 +130 @@
-- printf("ERROR: Unable to find Intel FPGA OpenCL platform.\n");
+ // Initialize OpenCL.
+ if(!init_opencl()) {
+ return -1;
+@@ -120,9 +124,9 @@
+ }
+
+ // Get the OpenCL platform.
+- platform = findPlatform("Intel(R) FPGA SDK for OpenCL(TM)");
++ platform = findPlatform("Xilinx);
+ if(platform == NULL) {
+- printf("ERROR: Unable to find Intel(R) FPGA OpenCL platform.\n");
+ printf("ERROR: Unable to find Xilinx FPGA OpenCL platform.\n");
-@@ -143,2 +148,4 @@
+ return false;
+ }
+
+@@ -140,8 +144,10 @@
+
+ // Create the program for all device. Use the first device as the
+ // representative device (assuming all device are of the same type).
- std::string binary_file = getBoardBinaryFile("vector_add", device[0]);
- printf("Using AOCX: %s\n", binary_file.c_str());
+ std::string fname = "xclbin/vector_add."+ hwtype + "." + VERSION_STR;
+ printf("Looking for %s.\n",fname.c_str());
+ std::string binary_file = getBoardBinaryFile(fname.c_str(), device[0]);
+ printf("Using XCLBIN: %s\n", binary_file.c_str());
+ program = createProgramFromBinary(context, binary_file.c_str(), device, num_devices);
+
+ // Build the program that was just created.
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld
old mode 100644
new mode 100755
similarity index 100%
rename from SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld
rename to SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld_ocl_afi-ids.txt b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld_ocl_afi-ids.txt
similarity index 100%
rename from SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld_ocl_afi-ids.txt
rename to SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld_ocl_afi-ids.txt
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld_ocl_agfi-ids.txt b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld_ocl_agfi-ids.txt
similarity index 100%
rename from SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/helloworld_ocl_agfi-ids.txt
rename to SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/helloworld_ocl_agfi-ids.txt
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/sdaccel.ini b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/sdaccel.ini
new file mode 100644
index 00000000..c75131c1
--- /dev/null
+++ b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/sdaccel.ini
@@ -0,0 +1,2 @@
+[Debug]
+profile=true
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin b/SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin
similarity index 100%
rename from SDAccel/examples/aws/helloworld_ocl_runtime/2018.3/vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin
rename to SDAccel/examples/aws/helloworld_ocl_runtime/2018.3_2019.1/vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/README.md b/SDAccel/examples/aws/helloworld_ocl_runtime/README.md
index 1782662b..a8c9ad2a 100644
--- a/SDAccel/examples/aws/helloworld_ocl_runtime/README.md
+++ b/SDAccel/examples/aws/helloworld_ocl_runtime/README.md
@@ -31,23 +31,19 @@ vector_addition.hw.xilinx_aws-vu9p-f1-04261818_dynamic_5_0.awsxclbin --awsxclbin
## Execution
-
-#### :exclamation: PLEASE NOTE: xclbin & awsxclbin file formats have changed for SDx 2018.3. xclbin & awsxclbin files generated using earlier SDx versions are not compatible with 2018.3 based XRTs. If you are using a 2018.3 based XRT, please copy over awsxclbin & helloworld executable files provided in the 2018.3 subdirectory to this folder.
-
-Command sequence
+#### :exclamation: PLEASE NOTE: xclbin & awsxclbin file formats have changed from SDx 2018.3 onwards. xclbin & awsxclbin files generated using earlier SDx versions are not compatible with 2018.3/2019.1 based XRTs. If you are using a 2018.3/2019.1 based XRT, please copy over awsxclbin & helloworld executable files provided in the 2018.3_2019.1 subdirectory to this folder.
```
sudo fpga-clear-local-image -S 0
- >>$sudo sh
-sh-4.2# source $AWS_FPGA_REPO_DIR/sdaccel_runtime_setup.sh
-sh-4.2# ./helloworld
-
+sudo -E /bin/bash
+source $AWS_FPGA_REPO_DIR/sdaccel_runtime_setup.sh
+./helloworld
```
## Hello World Example Metadata
-| Key | Region | Value for 2017.4 or 2018.2 | Value for 2018.3 |
+| Key | Region | SDx 2017.4 or 2018.2 | SDx 2018.3 or 2019.1 |
|--------|---------|-----------------------------|------------------|
|afi id | us-east-1(N. Virginia) | afi-0532379b26ea13f26 | afi-0c8210915ce9bab5c |
|afi id | us-west-2(oregon) | afi-0ab098d3fbfc43c7e | afi-01e237aa978aa74de |
diff --git a/SDAccel/examples/aws/helloworld_ocl_runtime/sdaccel.ini b/SDAccel/examples/aws/helloworld_ocl_runtime/sdaccel.ini
new file mode 100644
index 00000000..c75131c1
--- /dev/null
+++ b/SDAccel/examples/aws/helloworld_ocl_runtime/sdaccel.ini
@@ -0,0 +1,2 @@
+[Debug]
+profile=true
diff --git a/SDAccel/examples/xilinx_2017.4 b/SDAccel/examples/xilinx_2017.4
deleted file mode 160000
index cd196250..00000000
--- a/SDAccel/examples/xilinx_2017.4
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit cd196250dfdd63491080e8c6f3e79fe6d1718997
diff --git a/SDAccel/examples/xilinx_2018.2 b/SDAccel/examples/xilinx_2018.2
deleted file mode 160000
index 70a0f3ed..00000000
--- a/SDAccel/examples/xilinx_2018.2
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 70a0f3edc6d78f3de13806ec7a7a01d1fbe0d2bd
diff --git a/SDAccel/examples/xilinx_2018.3 b/SDAccel/examples/xilinx_2018.3
deleted file mode 160000
index b2884db9..00000000
--- a/SDAccel/examples/xilinx_2018.3
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b2884db9768d6589ae094cd06d9b491b3bd39816
diff --git a/SDAccel/examples/xilinx_2019.1 b/SDAccel/examples/xilinx_2019.1
new file mode 160000
index 00000000..0ec1aef5
--- /dev/null
+++ b/SDAccel/examples/xilinx_2019.1
@@ -0,0 +1 @@
+Subproject commit 0ec1aef54f3bf17c78581630d687b13fadae9616
diff --git a/SDAccel/kernel_version.txt b/SDAccel/kernel_version.txt
index 2ecfd922..20429c73 100644
--- a/SDAccel/kernel_version.txt
+++ b/SDAccel/kernel_version.txt
@@ -2,3 +2,6 @@
3.10.0-693.21.1.el7.x86_64
3.10.0-957.1.3.el7.x86_64
3.10.0-957.5.1.el7.x86_64
+3.10.0-957.27.2.el7.x86_64
+3.10.0-1062.4.1.el7.x86_64
+3.10.0-1062.9.1.el7.x86_64
\ No newline at end of file
diff --git a/SDAccel/sdaccel_xrt_version.txt b/SDAccel/sdaccel_xrt_version.txt
index 4518becc..7322b406 100644
--- a/SDAccel/sdaccel_xrt_version.txt
+++ b/SDAccel/sdaccel_xrt_version.txt
@@ -1,4 +1,6 @@
2018.2:343186f76f59edd01bc48d84cf67fe22a0a3f338
2018.2:65ffad62f427c0bd1bc65b6ea555a810295468b7
-2018.3:3636217b633930ed4815abd598324691ca25c2f3
-2018.3:48cafdc100b29843fd013d371ffba0141db06b7a
+2018.3:8654da1f0d2bd196c9887bdcfe1479103a93e90a
+2019.1:e21b8a5b208618834760593bbb15063f7e399642
+2019.1:dd210161e204e882027d22132725d8ffdf285149
+2019.1:89e25d51313daac5c322dfb4e84707829306d3fe
\ No newline at end of file
diff --git a/SDAccel/tests/test_find_sdaccel_examples.py b/SDAccel/tests/test_find_sdaccel_examples.py
index f0c5c758..4d9f1c94 100644
--- a/SDAccel/tests/test_find_sdaccel_examples.py
+++ b/SDAccel/tests/test_find_sdaccel_examples.py
@@ -44,6 +44,8 @@ class TestFindSDAccelExamples(AwsFpgaTestBase):
NOTE: Cannot have an __init__ method.
'''
+ ADD_XILINX_VERSION = True
+
@classmethod
def setup_class(cls):
'''
@@ -52,7 +54,7 @@ def setup_class(cls):
AwsFpgaTestBase.setup_class(cls, __file__)
return
- def test_find_example_makefiles(self):
+ def test_find_example_makefiles(self, xilinxVersion):
assert os.path.exists(self.xilinx_sdaccel_examples_dir), "The Xilinx SDAccel example dir does not exist: {}".format(self.xilinx_sdaccel_examples_dir)
assert os.listdir(self.xilinx_sdaccel_examples_dir) != [], "Xilinx SDAccel example submodule not cloned or does not exist"
@@ -61,13 +63,34 @@ def test_find_example_makefiles(self):
xilinx_sdaccel_example_map = {}
for root, dirs, files in os.walk(self.xilinx_sdaccel_examples_dir):
- for file in files:
- if file.endswith('Makefile'):
- makefile_path = root + "/Makefile"
-
- # If the Makefile has a docs target, it's not the makefile we want to read
- if 'docs:' not in open(makefile_path).read():
- xilinx_examples_makefiles.append(root)
+ ignore = False
+
+ if os.path.exists(root + "/description.json") and os.path.exists(root + "/Makefile"):
+ with open(root + "/description.json", "r") as description_file:
+ description = json.load(description_file)
+
+ if "containers" in description:
+ if len(description["containers"]) > 1:
+ ignore = True
+ logger.info("Ignoring {} as >1 containers found in description.json.".format(root))
+
+ else:
+ ignore = True
+ logger.info("Ignoring {} as no containers found in description.json.".format(root))
+ continue
+
+ if "nboard" in description:
+ if "xilinx_aws-vu9p-f1-04261818" in description["nboard"]:
+ ignore = True
+ logger.info("Ignoring {} as F1 device found in nboard.".format(root))
+ continue
+ else:
+ ignore = True
+ logger.warn("Ignoring: {} as no Makefile/description.json exist".format(root))
+
+ if not ignore:
+ xilinx_examples_makefiles.append(root)
+ logger.info("Adding: " + root)
assert len(xilinx_examples_makefiles) != 0, "Could not find any Xilinx SDAccel example in %s" % self.xilinx_sdaccel_examples_dir
@@ -84,4 +107,8 @@ def test_find_example_makefiles(self):
with open(self.xilinx_sdaccel_examples_list_file, 'w') as outfile:
json.dump(xilinx_sdaccel_example_map, outfile)
+ # Also write the archive file
+ with open(self.xilinx_sdaccel_examples_list_file + "." + xilinxVersion, 'w') as archive_file:
+ json.dump(xilinx_sdaccel_example_map, archive_file)
+
assert os.path.getsize(self.xilinx_sdaccel_examples_list_file) > 0, "%s is a non zero file. We need to have some data in the file" % self.xilinx_sdaccel_examples_list_file
diff --git a/SDAccel/tests/test_run_sdaccel_example.py b/SDAccel/tests/test_run_sdaccel_example.py
index 4bf1ecaf..201fb5c1 100644
--- a/SDAccel/tests/test_run_sdaccel_example.py
+++ b/SDAccel/tests/test_run_sdaccel_example.py
@@ -68,13 +68,14 @@ def setup_class(cls):
return
+ @pytest.mark.flaky(reruns=2, reruns_delay=2)
def test_run_sdaccel_example(self, examplePath, rteName, xilinxVersion):
os.chdir(self.get_sdaccel_example_fullpath(examplePath))
(rc, stdout_lines, stderr_lines) = self.run_cmd("make exe")
assert rc == 0
- em_run_cmd = self.get_sdaccel_example_run_cmd(examplePath)
+ em_run_cmd = self.get_sdaccel_example_run_cmd(examplePath, xilinxVersion)
check_runtime_script = os.path.join(AwsFpgaTestBase.WORKSPACE,'sdaccel_runtime_setup.sh')
self.get_sdaccel_aws_xclbin_file(examplePath, rteName, xilinxVersion)
diff --git a/SDAccel/tools/awssak/Makefile b/SDAccel/tools/awssak/Makefile
index 8b1fff39..019ae6fa 100644
--- a/SDAccel/tools/awssak/Makefile
+++ b/SDAccel/tools/awssak/Makefile
@@ -24,14 +24,15 @@ CXX := g++
CXXFLAGS := -Wall -Werror -std=c++11
ROOT = $(SDACCEL_DIR)
-HAL_INC := -I../../include -I$(ROOT)/userspace/include
+HAL_INC := -I$(SDACCEL_DIR)/userspace/src -I$(SDACCEL_DIR)/userspace/include -I$(SDK_DIR)/userspace/include -I$(SDK_DIR)/linux_kernel_drivers
CXXFLAGS += $(HAL_INC)
ifeq ($(ec2),1)
-AWSBM_HAL_LIBNAME := $(ROOT)/userspace/src/libawsxcldrv.a
+AWS_HAL_LIBNAME := $(ROOT)/userspace/src/libxrt-aws.a
else
-AWSBM_HAL_LIBNAME := $(ROOT)/userspace/src/libawsbmdrv.a
+AWS_HAL_LIBNAME := $(ROOT)/userspace/src/libxrtbm-aws.a
+CXXFLAGS += -DINTERNAL_RELEASE
endif
ifeq ($(debug),1)
@@ -59,8 +60,8 @@ all : $(EXENAME)
$(CXX) $(CXXFLAGS) $(MYCFLAGS) $(MYCXXFLAGS) -c $< -o $@
$(CXX) $(CXXFLAGS) $(MYCFLAGS) $(MYCXXFLAGS) -c -MM $< -o $(patsubst %.o, %.d, $@)
-$(EXENAME): $(OBJS) $(AWSBM_HAL_LIBNAME)
- $(CXX) -o $@ $(OBJS) $(AWSBM_HAL_LIBNAME) $(LDFLAGS) $(LDLIBS) -lrt
+$(EXENAME): $(OBJS) $(AWS_HAL_LIBNAME)
+ $(CXX) -o $@ $(OBJS) $(AWS_HAL_LIBNAME) $(LDFLAGS) $(LDLIBS) -lrt -pthread
clean:
rm -rf *.o *.d $(EXENAME)
diff --git a/SDAccel/tools/awssak/main.cpp b/SDAccel/tools/awssak/main.cpp
index 91f95bd6..09b96f57 100644
--- a/SDAccel/tools/awssak/main.cpp
+++ b/SDAccel/tools/awssak/main.cpp
@@ -1,9 +1,7 @@
/**
* Copyright (C) 2017-2018 Xilinx, Inc
- * Author: Sonal Santan
- * Simple command line utility to interact with SDX PCIe devices
- *
- * Code copied verbatim from SDAccel xbsak implementation
+ * Author: Sonal Santan, Ryan Radjabi
+ * Simple command line utility to inetract with SDX PCIe devices
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
@@ -18,921 +16,9 @@
* under the License.
*/
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include