Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
espressomd
walberla_for_es
Commits
6495a79a
Commit
6495a79a
authored
Jan 26, 2019
by
Michael Kuron
Browse files
Replace deprecated CUDA API calls
parent
ddc0a76e
Changes
1
Hide whitespace changes
Inline
Side-by-side
src/cuda/Kernel.h
View file @
6495a79a
...
...
@@ -107,9 +107,6 @@ namespace cuda {
protected:
template
<
typename
T
>
size_t
determineNextOffset
();
//** Members **********************************************************************************************
/*! \name Members */
//@{
...
...
@@ -120,11 +117,7 @@ namespace cuda {
dim3
blockDim_
;
std
::
size_t
sharedMemSize_
;
struct
ParamInfo
{
std
::
vector
<
char
>
data
;
size_t
offset
;
};
std
::
vector
<
ParamInfo
>
params_
;
std
::
vector
<
std
::
vector
<
char
>
>
params_
;
//@}
//****************************************************************************************************************
...
...
@@ -187,10 +180,9 @@ namespace cuda {
template
<
typename
T
>
void
Kernel
<
FP
>::
addParam
(
const
T
&
param
)
{
ParamInfo
paramInfo
;
paramInfo
.
data
.
resize
(
sizeof
(
T
)
);
std
::
memcpy
(
&
(
paramInfo
.
data
[
0
]),
&
param
,
sizeof
(
T
)
);
paramInfo
.
offset
=
determineNextOffset
<
T
>
();
std
::
vector
<
char
>
paramInfo
;
paramInfo
.
resize
(
sizeof
(
T
)
);
std
::
memcpy
(
paramInfo
.
data
(),
&
param
,
sizeof
(
T
)
);
WALBERLA_ASSERT
(
checkParameter
<
T
>
(
params_
.
size
()
+
1
),
"cuda::Kernel type mismatch of parameter "
<<
params_
.
size
()
+
1
);
...
...
@@ -231,28 +223,21 @@ namespace cuda {
void
Kernel
<
FP
>::
operator
()
(
cudaStream_t
stream
)
const
{
// check for correct number of parameter calls
if
(
params_
.
size
()
!=
boost
::
function_traits
<
FuncType
>::
arity
)
{
WALBERLA_ABORT
(
"Error when calling cuda::Kernel - Wrong number of arguments. "
<<
"Expected "
<<
boost
::
function_traits
<
FuncType
>::
arity
<<
", received "
<<
params_
.
size
()
);
}
// set the number of blocks and threads,
WALBERLA_CUDA_CHECK
(
cudaConfigureCall
(
gridDim_
,
blockDim_
,
sharedMemSize_
,
stream
)
);
// register all parameters
std
::
vector
<
void
*>
args
;
for
(
auto
paramIt
=
params_
.
begin
();
paramIt
!=
params_
.
end
();
++
paramIt
)
{
const
void
*
ptr
=
&
(
paramIt
->
data
[
0
]);
WALBERLA_CUDA_CHECK
(
cudaSetupArgument
(
ptr
,
paramIt
->
data
.
size
(),
paramIt
->
offset
)
);
args
.
push_back
(
const_cast
<
char
*>
(
paramIt
->
data
())
);
}
// .. and launch the kernel
static_assert
(
sizeof
(
void
*
)
==
sizeof
(
void
(
*
)(
void
)),
"object pointer and function pointer sizes must be equal"
);
// dirty casting trick to circumvent compiler warning
// essentially the next two lines are: cudaLaunch( funcPtr_ );
void
*
q
=
(
void
*
)
&
funcPtr_
;
WALBERLA_CUDA_CHECK
(
cudaLaunch
(
(
const
char
*
)
(
*
static_cast
<
void
**>
(
q
)
))
);
WALBERLA_CUDA_CHECK
(
cudaLaunchKernel
(
(
void
*
)
funcPtr_
,
gridDim_
,
blockDim_
,
args
.
data
(),
sharedMemSize_
,
stream
)
);
}
...
...
@@ -276,19 +261,6 @@ namespace cuda {
}
template
<
typename
FP
>
template
<
typename
T
>
size_t
Kernel
<
FP
>::
determineNextOffset
()
{
size_t
currentOffset
=
0
;
if
(
!
params_
.
empty
()
)
currentOffset
=
params_
.
back
().
offset
+
params_
.
back
().
data
.
size
();
size_t
alignment
=
__alignof
(
T
);
return
(
currentOffset
+
alignment
-
1
)
&
~
(
alignment
-
1
);
}
}
// namespace cuda
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment